1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       const TargetRegisterClass *RC;
117       if (Size <= RISCV::RVVBitsPerBlock)
118         RC = &RISCV::VRRegClass;
119       else if (Size == 2 * RISCV::RVVBitsPerBlock)
120         RC = &RISCV::VRM2RegClass;
121       else if (Size == 4 * RISCV::RVVBitsPerBlock)
122         RC = &RISCV::VRM4RegClass;
123       else if (Size == 8 * RISCV::RVVBitsPerBlock)
124         RC = &RISCV::VRM8RegClass;
125       else
126         llvm_unreachable("Unexpected size");
127 
128       addRegisterClass(VT, RC);
129     };
130 
131     for (MVT VT : BoolVecVTs)
132       addRegClassForRVV(VT);
133     for (MVT VT : IntVecVTs) {
134       if (VT.getVectorElementType() == MVT::i64 &&
135           !Subtarget.hasVInstructionsI64())
136         continue;
137       addRegClassForRVV(VT);
138     }
139 
140     if (Subtarget.hasVInstructionsF16())
141       for (MVT VT : F16VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.hasVInstructionsF32())
145       for (MVT VT : F32VecVTs)
146         addRegClassForRVV(VT);
147 
148     if (Subtarget.hasVInstructionsF64())
149       for (MVT VT : F64VecVTs)
150         addRegClassForRVV(VT);
151 
152     if (Subtarget.useRVVForFixedLengthVectors()) {
153       auto addRegClassForFixedVectors = [this](MVT VT) {
154         MVT ContainerVT = getContainerForFixedLengthVector(VT);
155         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
156         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
157         addRegisterClass(VT, TRI.getRegClass(RCID));
158       };
159       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
160         if (useRVVForFixedLengthVectorVT(VT))
161           addRegClassForFixedVectors(VT);
162 
163       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
164         if (useRVVForFixedLengthVectorVT(VT))
165           addRegClassForFixedVectors(VT);
166     }
167   }
168 
169   // Compute derived properties from the register classes.
170   computeRegisterProperties(STI.getRegisterInfo());
171 
172   setStackPointerRegisterToSaveRestore(RISCV::X2);
173 
174   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT,
175                    MVT::i1, Promote);
176 
177   // TODO: add all necessary setOperationAction calls.
178   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
179 
180   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
181   setOperationAction(ISD::BR_CC, XLenVT, Expand);
182   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
183   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
184 
185   setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
189 
190   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
191 
192   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
193 
194   if (!Subtarget.hasStdExtZbb())
195     setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand);
196 
197   if (Subtarget.is64Bit()) {
198     setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
199 
200     setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
201                        MVT::i32, Custom);
202 
203     setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
204                        MVT::i32, Custom);
205   } else {
206     setLibcallName(
207         {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128},
208         nullptr);
209     setLibcallName(RTLIB::MULO_I64, nullptr);
210   }
211 
212   if (!Subtarget.hasStdExtM()) {
213     setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV,
214                         ISD::SREM, ISD::UREM},
215                        XLenVT, Expand);
216   } else {
217     if (Subtarget.is64Bit()) {
218       setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom);
219 
220       setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
221                          {MVT::i8, MVT::i16, MVT::i32}, Custom);
222     } else {
223       setOperationAction(ISD::MUL, MVT::i64, Custom);
224     }
225   }
226 
227   setOperationAction(
228       {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT,
229       Expand);
230 
231   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT,
232                      Custom);
233 
234   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
235       Subtarget.hasStdExtZbkb()) {
236     if (Subtarget.is64Bit())
237       setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
238   } else {
239     setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
240   }
241 
242   if (Subtarget.hasStdExtZbp()) {
243     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
244     // more combining.
245     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom);
246 
247     // BSWAP i8 doesn't exist.
248     setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
249 
250     setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom);
251 
252     if (Subtarget.is64Bit())
253       setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom);
254   } else {
255     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
256     // pattern match it directly in isel.
257     setOperationAction(ISD::BSWAP, XLenVT,
258                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
259                            ? Legal
260                            : Expand);
261     // Zbkb can use rev8+brev8 to implement bitreverse.
262     setOperationAction(ISD::BITREVERSE, XLenVT,
263                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
264   }
265 
266   if (Subtarget.hasStdExtZbb()) {
267     setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
268                        Legal);
269 
270     if (Subtarget.is64Bit())
271       setOperationAction(
272           {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
273           MVT::i32, Custom);
274   } else {
275     setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand);
276 
277     if (Subtarget.is64Bit())
278       setOperationAction(ISD::ABS, MVT::i32, Custom);
279   }
280 
281   if (Subtarget.hasStdExtZbt()) {
282     setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
283     setOperationAction(ISD::SELECT, XLenVT, Legal);
284 
285     if (Subtarget.is64Bit())
286       setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
287   } else {
288     setOperationAction(ISD::SELECT, XLenVT, Custom);
289   }
290 
291   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
292       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
293       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
294       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
295       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
296       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
297       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
298 
299   static const ISD::CondCode FPCCToExpand[] = {
300       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
301       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
302       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
303 
304   static const ISD::NodeType FPOpToExpand[] = {
305       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
306       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
307 
308   if (Subtarget.hasStdExtZfh())
309     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
310 
311   if (Subtarget.hasStdExtZfh()) {
312     for (auto NT : FPLegalNodeTypes)
313       setOperationAction(NT, MVT::f16, Legal);
314     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
315     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
316     setCondCodeAction(FPCCToExpand, MVT::f16, Expand);
317     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
318     setOperationAction(ISD::SELECT, MVT::f16, Custom);
319     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
320 
321     setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT,
322                         ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC,
323                         ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN,
324                         ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG,
325                         ISD::FLOG2, ISD::FLOG10},
326                        MVT::f16, Promote);
327 
328     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
329     // complete support for all operations in LegalizeDAG.
330 
331     // We need to custom promote this.
332     if (Subtarget.is64Bit())
333       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
334   }
335 
336   if (Subtarget.hasStdExtF()) {
337     for (auto NT : FPLegalNodeTypes)
338       setOperationAction(NT, MVT::f32, Legal);
339     setCondCodeAction(FPCCToExpand, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
341     setOperationAction(ISD::SELECT, MVT::f32, Custom);
342     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
343     for (auto Op : FPOpToExpand)
344       setOperationAction(Op, MVT::f32, Expand);
345     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
346     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
347   }
348 
349   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
350     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
351 
352   if (Subtarget.hasStdExtD()) {
353     for (auto NT : FPLegalNodeTypes)
354       setOperationAction(NT, MVT::f64, Legal);
355     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
356     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
357     setCondCodeAction(FPCCToExpand, MVT::f64, Expand);
358     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
359     setOperationAction(ISD::SELECT, MVT::f64, Custom);
360     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
361     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
362     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
363     for (auto Op : FPOpToExpand)
364       setOperationAction(Op, MVT::f64, Expand);
365     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
366     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
367   }
368 
369   if (Subtarget.is64Bit())
370     setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT,
371                         ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT},
372                        MVT::i32, Custom);
373 
374   if (Subtarget.hasStdExtF()) {
375     setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT,
376                        Custom);
377 
378     setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
379                         ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
380                        XLenVT, Legal);
381 
382     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
383     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
384   }
385 
386   setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool,
387                       ISD::JumpTable},
388                      XLenVT, Custom);
389 
390   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
391 
392   if (Subtarget.is64Bit())
393     setOperationAction(ISD::Constant, MVT::i64, Custom);
394 
395   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
396   // Unfortunately this can't be determined just from the ISA naming string.
397   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
398                      Subtarget.is64Bit() ? Legal : Custom);
399 
400   setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal);
401   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
402   if (Subtarget.is64Bit())
403     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
404 
405   if (Subtarget.hasStdExtA()) {
406     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
407     setMinCmpXchgSizeInBits(32);
408   } else {
409     setMaxAtomicSizeInBitsSupported(0);
410   }
411 
412   setBooleanContents(ZeroOrOneBooleanContent);
413 
414   if (Subtarget.hasVInstructions()) {
415     setBooleanVectorContents(ZeroOrOneBooleanContent);
416 
417     setOperationAction(ISD::VSCALE, XLenVT, Custom);
418 
419     // RVV intrinsics may have illegal operands.
420     // We also need to custom legalize vmv.x.s.
421     setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
422                        {MVT::i8, MVT::i16}, Custom);
423     if (Subtarget.is64Bit())
424       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
425     else
426       setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN},
427                          MVT::i64, Custom);
428 
429     setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID},
430                        MVT::Other, Custom);
431 
432     static const unsigned IntegerVPOps[] = {
433         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
434         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
435         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
436         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
437         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
438         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
439         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
440         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
441         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SIGN_EXTEND,
442         ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE};
443 
444     static const unsigned FloatingPointVPOps[] = {
445         ISD::VP_FADD,        ISD::VP_FSUB,
446         ISD::VP_FMUL,        ISD::VP_FDIV,
447         ISD::VP_FNEG,        ISD::VP_FMA,
448         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
449         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
450         ISD::VP_MERGE,       ISD::VP_SELECT,
451         ISD::VP_SITOFP,      ISD::VP_UITOFP,
452         ISD::VP_SETCC,       ISD::VP_FP_ROUND,
453         ISD::VP_FP_EXTEND};
454 
455     if (!Subtarget.is64Bit()) {
456       // We must custom-lower certain vXi64 operations on RV32 due to the vector
457       // element type being illegal.
458       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
459                          MVT::i64, Custom);
460 
461       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
462                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
463                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
464                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
465                          MVT::i64, Custom);
466 
467       setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
468                           ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR,
469                           ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN,
470                           ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN},
471                          MVT::i64, Custom);
472     }
473 
474     for (MVT VT : BoolVecVTs) {
475       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
476 
477       // Mask VTs are custom-expanded into a series of standard nodes
478       setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS,
479                           ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
480                          VT, Custom);
481 
482       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
483                          Custom);
484 
485       setOperationAction(ISD::SELECT, VT, Custom);
486       setOperationAction(
487           {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT,
488           Expand);
489 
490       setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom);
491 
492       setOperationAction(
493           {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
494           Custom);
495 
496       setOperationAction(
497           {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
498           Custom);
499 
500       // RVV has native int->float & float->int conversions where the
501       // element type sizes are within one power-of-two of each other. Any
502       // wider distances between type sizes have to be lowered as sequences
503       // which progressively narrow the gap in stages.
504       setOperationAction(
505           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
506           VT, Custom);
507 
508       // Expand all extending loads to types larger than this, and truncating
509       // stores from types larger than this.
510       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
511         setTruncStoreAction(OtherVT, VT, Expand);
512         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
513                          VT, Expand);
514       }
515 
516       setOperationAction(
517           {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
518           Custom);
519     }
520 
521     for (MVT VT : IntVecVTs) {
522       if (VT.getVectorElementType() == MVT::i64 &&
523           !Subtarget.hasVInstructionsI64())
524         continue;
525 
526       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
527       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
528 
529       // Vectors implement MULHS/MULHU.
530       setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
531 
532       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
533       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV())
534         setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand);
535 
536       setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT,
537                          Legal);
538 
539       setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand);
540 
541       setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT,
542                          Expand);
543 
544       setOperationAction(ISD::BSWAP, VT, Expand);
545 
546       // Custom-lower extensions and truncations from/to mask types.
547       setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND},
548                          VT, Custom);
549 
550       // RVV has native int->float & float->int conversions where the
551       // element type sizes are within one power-of-two of each other. Any
552       // wider distances between type sizes have to be lowered as sequences
553       // which progressively narrow the gap in stages.
554       setOperationAction(
555           {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
556           VT, Custom);
557 
558       setOperationAction(
559           {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
560 
561       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
562       // nodes which truncate by one power of two at a time.
563       setOperationAction(ISD::TRUNCATE, VT, Custom);
564 
565       // Custom-lower insert/extract operations to simplify patterns.
566       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
567                          Custom);
568 
569       // Custom-lower reduction operations to set up the corresponding custom
570       // nodes' operands.
571       setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND,
572                           ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR,
573                           ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN,
574                           ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN},
575                          VT, Custom);
576 
577       setOperationAction(IntegerVPOps, VT, Custom);
578 
579       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
580 
581       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
582                          VT, Custom);
583 
584       setOperationAction(
585           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
586           Custom);
587 
588       setOperationAction(
589           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
590           VT, Custom);
591 
592       setOperationAction(ISD::SELECT, VT, Custom);
593       setOperationAction(ISD::SELECT_CC, VT, Expand);
594 
595       setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom);
596 
597       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
598         setTruncStoreAction(VT, OtherVT, Expand);
599         setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT,
600                          VT, Expand);
601       }
602 
603       // Splice
604       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
605 
606       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
607       // type that can represent the value exactly.
608       if (VT.getVectorElementType() != MVT::i64) {
609         MVT FloatEltVT =
610             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
611         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
612         if (isTypeLegal(FloatVT)) {
613           setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
614                              Custom);
615         }
616       }
617     }
618 
619     // Expand various CCs to best match the RVV ISA, which natively supports UNE
620     // but no other unordered comparisons, and supports all ordered comparisons
621     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
622     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
623     // and we pattern-match those back to the "original", swapping operands once
624     // more. This way we catch both operations and both "vf" and "fv" forms with
625     // fewer patterns.
626     static const ISD::CondCode VFPCCToExpand[] = {
627         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
628         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
629         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
630     };
631 
632     // Sets common operation actions on RVV floating-point vector types.
633     const auto SetCommonVFPActions = [&](MVT VT) {
634       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
635       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
636       // sizes are within one power-of-two of each other. Therefore conversions
637       // between vXf16 and vXf64 must be lowered as sequences which convert via
638       // vXf32.
639       setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
640       // Custom-lower insert/extract operations to simplify patterns.
641       setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT,
642                          Custom);
643       // Expand various condition codes (explained above).
644       setCondCodeAction(VFPCCToExpand, VT, Expand);
645 
646       setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal);
647 
648       setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
649                          VT, Custom);
650 
651       setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
652                           ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
653                          VT, Custom);
654 
655       // Expand FP operations that need libcalls.
656       setOperationAction(ISD::FREM, VT, Expand);
657       setOperationAction(ISD::FPOW, VT, Expand);
658       setOperationAction(ISD::FCOS, VT, Expand);
659       setOperationAction(ISD::FSIN, VT, Expand);
660       setOperationAction(ISD::FSINCOS, VT, Expand);
661       setOperationAction(ISD::FEXP, VT, Expand);
662       setOperationAction(ISD::FEXP2, VT, Expand);
663       setOperationAction(ISD::FLOG, VT, Expand);
664       setOperationAction(ISD::FLOG2, VT, Expand);
665       setOperationAction(ISD::FLOG10, VT, Expand);
666       setOperationAction(ISD::FRINT, VT, Expand);
667       setOperationAction(ISD::FNEARBYINT, VT, Expand);
668 
669       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
670       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
671       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
672       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
673 
674       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
675 
676       setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
677 
678       setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER},
679                          VT, Custom);
680 
681       setOperationAction(
682           {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
683           Custom);
684 
685       setOperationAction(ISD::SELECT, VT, Custom);
686       setOperationAction(ISD::SELECT_CC, VT, Expand);
687 
688       setOperationAction(
689           {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR},
690           VT, Custom);
691 
692       setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom);
693 
694       setOperationAction(FloatingPointVPOps, VT, Custom);
695     };
696 
697     // Sets common extload/truncstore actions on RVV floating-point vector
698     // types.
699     const auto SetCommonVFPExtLoadTruncStoreActions =
700         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
701           for (auto SmallVT : SmallerVTs) {
702             setTruncStoreAction(VT, SmallVT, Expand);
703             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
704           }
705         };
706 
707     if (Subtarget.hasVInstructionsF16())
708       for (MVT VT : F16VecVTs)
709         SetCommonVFPActions(VT);
710 
711     for (MVT VT : F32VecVTs) {
712       if (Subtarget.hasVInstructionsF32())
713         SetCommonVFPActions(VT);
714       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
715     }
716 
717     for (MVT VT : F64VecVTs) {
718       if (Subtarget.hasVInstructionsF64())
719         SetCommonVFPActions(VT);
720       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
721       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
722     }
723 
724     if (Subtarget.useRVVForFixedLengthVectors()) {
725       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
726         if (!useRVVForFixedLengthVectorVT(VT))
727           continue;
728 
729         // By default everything must be expanded.
730         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
731           setOperationAction(Op, VT, Expand);
732         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
733           setTruncStoreAction(VT, OtherVT, Expand);
734           setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD},
735                            OtherVT, VT, Expand);
736         }
737 
738         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
739         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
740                            Custom);
741 
742         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT,
743                            Custom);
744 
745         setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT},
746                            VT, Custom);
747 
748         setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom);
749 
750         setOperationAction(ISD::SETCC, VT, Custom);
751 
752         setOperationAction(ISD::SELECT, VT, Custom);
753 
754         setOperationAction(ISD::TRUNCATE, VT, Custom);
755 
756         setOperationAction(ISD::BITCAST, VT, Custom);
757 
758         setOperationAction(
759             {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT,
760             Custom);
761 
762         setOperationAction(
763             {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT,
764             Custom);
765 
766         setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT,
767                             ISD::FP_TO_UINT},
768                            VT, Custom);
769 
770         // Operations below are different for between masks and other vectors.
771         if (VT.getVectorElementType() == MVT::i1) {
772           setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND,
773                               ISD::OR, ISD::XOR},
774                              VT, Custom);
775 
776           setOperationAction(
777               {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE},
778               VT, Custom);
779           continue;
780         }
781 
782         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
783         // it before type legalization for i64 vectors on RV32. It will then be
784         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
785         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
786         // improvements first.
787         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
788           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
789           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
790         }
791 
792         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
793         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
794 
795         setOperationAction(
796             {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom);
797 
798         setOperationAction(
799             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
800             Custom);
801 
802         setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR,
803                             ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV,
804                             ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL},
805                            VT, Custom);
806 
807         setOperationAction(
808             {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom);
809 
810         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
811         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
812           setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
813 
814         setOperationAction(
815             {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT,
816             Custom);
817 
818         setOperationAction(ISD::VSELECT, VT, Custom);
819         setOperationAction(ISD::SELECT_CC, VT, Expand);
820 
821         setOperationAction(
822             {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom);
823 
824         // Custom-lower reduction operations to set up the corresponding custom
825         // nodes' operands.
826         setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX,
827                             ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX,
828                             ISD::VECREDUCE_UMIN},
829                            VT, Custom);
830 
831         setOperationAction(IntegerVPOps, VT, Custom);
832 
833         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
834         // type that can represent the value exactly.
835         if (VT.getVectorElementType() != MVT::i64) {
836           MVT FloatEltVT =
837               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
838           EVT FloatVT =
839               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
840           if (isTypeLegal(FloatVT))
841             setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT,
842                                Custom);
843         }
844       }
845 
846       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
847         if (!useRVVForFixedLengthVectorVT(VT))
848           continue;
849 
850         // By default everything must be expanded.
851         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
852           setOperationAction(Op, VT, Expand);
853         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
854           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
855           setTruncStoreAction(VT, OtherVT, Expand);
856         }
857 
858         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
859         setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT,
860                            Custom);
861 
862         setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
863                             ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT,
864                             ISD::EXTRACT_VECTOR_ELT},
865                            VT, Custom);
866 
867         setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
868                             ISD::MGATHER, ISD::MSCATTER},
869                            VT, Custom);
870 
871         setOperationAction(
872             {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT,
873             Custom);
874 
875         setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV,
876                             ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT,
877                             ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM},
878                            VT, Custom);
879 
880         setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom);
881 
882         setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND},
883                            VT, Custom);
884 
885         for (auto CC : VFPCCToExpand)
886           setCondCodeAction(CC, VT, Expand);
887 
888         setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom);
889         setOperationAction(ISD::SELECT_CC, VT, Expand);
890 
891         setOperationAction(ISD::BITCAST, VT, Custom);
892 
893         setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD,
894                             ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX},
895                            VT, Custom);
896 
897         setOperationAction(FloatingPointVPOps, VT, Custom);
898       }
899 
900       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
901       setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64},
902                          Custom);
903       if (Subtarget.hasStdExtZfh())
904         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
905       if (Subtarget.hasStdExtF())
906         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
907       if (Subtarget.hasStdExtD())
908         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
909     }
910   }
911 
912   // Function alignments.
913   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
914   setMinFunctionAlignment(FunctionAlignment);
915   setPrefFunctionAlignment(FunctionAlignment);
916 
917   setMinimumJumpTableEntries(5);
918 
919   // Jumps are expensive, compared to logic
920   setJumpIsExpensive();
921 
922   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
923                        ISD::OR, ISD::XOR});
924 
925   if (Subtarget.hasStdExtF())
926     setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM});
927 
928   if (Subtarget.hasStdExtZbp())
929     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
930 
931   if (Subtarget.hasStdExtZbb())
932     setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN});
933 
934   if (Subtarget.hasStdExtZbkb())
935     setTargetDAGCombine(ISD::BITREVERSE);
936   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
937     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
938   if (Subtarget.hasStdExtF())
939     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
940                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
941   if (Subtarget.hasVInstructions())
942     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
943                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
944                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
945 
946   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
947   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
948 }
949 
950 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
951                                             LLVMContext &Context,
952                                             EVT VT) const {
953   if (!VT.isVector())
954     return getPointerTy(DL);
955   if (Subtarget.hasVInstructions() &&
956       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
957     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
958   return VT.changeVectorElementTypeToInteger();
959 }
960 
961 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
962   return Subtarget.getXLenVT();
963 }
964 
965 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
966                                              const CallInst &I,
967                                              MachineFunction &MF,
968                                              unsigned Intrinsic) const {
969   auto &DL = I.getModule()->getDataLayout();
970   switch (Intrinsic) {
971   default:
972     return false;
973   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
974   case Intrinsic::riscv_masked_atomicrmw_add_i32:
975   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
976   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
977   case Intrinsic::riscv_masked_atomicrmw_max_i32:
978   case Intrinsic::riscv_masked_atomicrmw_min_i32:
979   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
980   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
981   case Intrinsic::riscv_masked_cmpxchg_i32:
982     Info.opc = ISD::INTRINSIC_W_CHAIN;
983     Info.memVT = MVT::i32;
984     Info.ptrVal = I.getArgOperand(0);
985     Info.offset = 0;
986     Info.align = Align(4);
987     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
988                  MachineMemOperand::MOVolatile;
989     return true;
990   case Intrinsic::riscv_masked_strided_load:
991     Info.opc = ISD::INTRINSIC_W_CHAIN;
992     Info.ptrVal = I.getArgOperand(1);
993     Info.memVT = getValueType(DL, I.getType()->getScalarType());
994     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
995     Info.size = MemoryLocation::UnknownSize;
996     Info.flags |= MachineMemOperand::MOLoad;
997     return true;
998   case Intrinsic::riscv_masked_strided_store:
999     Info.opc = ISD::INTRINSIC_VOID;
1000     Info.ptrVal = I.getArgOperand(1);
1001     Info.memVT =
1002         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1003     Info.align = Align(
1004         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1005         8);
1006     Info.size = MemoryLocation::UnknownSize;
1007     Info.flags |= MachineMemOperand::MOStore;
1008     return true;
1009   case Intrinsic::riscv_seg2_load:
1010   case Intrinsic::riscv_seg3_load:
1011   case Intrinsic::riscv_seg4_load:
1012   case Intrinsic::riscv_seg5_load:
1013   case Intrinsic::riscv_seg6_load:
1014   case Intrinsic::riscv_seg7_load:
1015   case Intrinsic::riscv_seg8_load:
1016     Info.opc = ISD::INTRINSIC_W_CHAIN;
1017     Info.ptrVal = I.getArgOperand(0);
1018     Info.memVT =
1019         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1020     Info.align =
1021         Align(DL.getTypeSizeInBits(
1022                   I.getType()->getStructElementType(0)->getScalarType()) /
1023               8);
1024     Info.size = MemoryLocation::UnknownSize;
1025     Info.flags |= MachineMemOperand::MOLoad;
1026     return true;
1027   }
1028 }
1029 
1030 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1031                                                 const AddrMode &AM, Type *Ty,
1032                                                 unsigned AS,
1033                                                 Instruction *I) const {
1034   // No global is ever allowed as a base.
1035   if (AM.BaseGV)
1036     return false;
1037 
1038   // RVV instructions only support register addressing.
1039   if (Subtarget.hasVInstructions() && isa<VectorType>(Ty))
1040     return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs;
1041 
1042   // Require a 12-bit signed offset.
1043   if (!isInt<12>(AM.BaseOffs))
1044     return false;
1045 
1046   switch (AM.Scale) {
1047   case 0: // "r+i" or just "i", depending on HasBaseReg.
1048     break;
1049   case 1:
1050     if (!AM.HasBaseReg) // allow "r+i".
1051       break;
1052     return false; // disallow "r+r" or "r+r+i".
1053   default:
1054     return false;
1055   }
1056 
1057   return true;
1058 }
1059 
1060 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1061   return isInt<12>(Imm);
1062 }
1063 
1064 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1065   return isInt<12>(Imm);
1066 }
1067 
1068 // On RV32, 64-bit integers are split into their high and low parts and held
1069 // in two different registers, so the trunc is free since the low register can
1070 // just be used.
1071 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1072   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1073     return false;
1074   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1075   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1076   return (SrcBits == 64 && DestBits == 32);
1077 }
1078 
1079 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1080   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1081       !SrcVT.isInteger() || !DstVT.isInteger())
1082     return false;
1083   unsigned SrcBits = SrcVT.getSizeInBits();
1084   unsigned DestBits = DstVT.getSizeInBits();
1085   return (SrcBits == 64 && DestBits == 32);
1086 }
1087 
1088 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1089   // Zexts are free if they can be combined with a load.
1090   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1091   // poorly with type legalization of compares preferring sext.
1092   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1093     EVT MemVT = LD->getMemoryVT();
1094     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1095         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1096          LD->getExtensionType() == ISD::ZEXTLOAD))
1097       return true;
1098   }
1099 
1100   return TargetLowering::isZExtFree(Val, VT2);
1101 }
1102 
1103 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1104   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1105 }
1106 
1107 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {
1108   return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32);
1109 }
1110 
1111 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1112   return Subtarget.hasStdExtZbb();
1113 }
1114 
1115 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1116   return Subtarget.hasStdExtZbb();
1117 }
1118 
1119 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1120   EVT VT = Y.getValueType();
1121 
1122   // FIXME: Support vectors once we have tests.
1123   if (VT.isVector())
1124     return false;
1125 
1126   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1127           Subtarget.hasStdExtZbkb()) &&
1128          !isa<ConstantSDNode>(Y);
1129 }
1130 
1131 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1132   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1133   auto *C = dyn_cast<ConstantSDNode>(Y);
1134   return C && C->getAPIntValue().ule(10);
1135 }
1136 
1137 bool RISCVTargetLowering::
1138     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
1139         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
1140         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
1141         SelectionDAG &DAG) const {
1142   // One interesting pattern that we'd want to form is 'bit extract':
1143   //   ((1 >> Y) & 1) ==/!= 0
1144   // But we also need to be careful not to try to reverse that fold.
1145 
1146   // Is this '((1 >> Y) & 1)'?
1147   if (XC && OldShiftOpcode == ISD::SRL && XC->isOne())
1148     return false; // Keep the 'bit extract' pattern.
1149 
1150   // Will this be '((1 >> Y) & 1)' after the transform?
1151   if (NewShiftOpcode == ISD::SRL && CC->isOne())
1152     return true; // Do form the 'bit extract' pattern.
1153 
1154   // If 'X' is a constant, and we transform, then we will immediately
1155   // try to undo the fold, thus causing endless combine loop.
1156   // So only do the transform if X is not a constant. This matches the default
1157   // implementation of this function.
1158   return !XC;
1159 }
1160 
1161 /// Check if sinking \p I's operands to I's basic block is profitable, because
1162 /// the operands can be folded into a target instruction, e.g.
1163 /// splats of scalars can fold into vector instructions.
1164 bool RISCVTargetLowering::shouldSinkOperands(
1165     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1166   using namespace llvm::PatternMatch;
1167 
1168   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1169     return false;
1170 
1171   auto IsSinker = [&](Instruction *I, int Operand) {
1172     switch (I->getOpcode()) {
1173     case Instruction::Add:
1174     case Instruction::Sub:
1175     case Instruction::Mul:
1176     case Instruction::And:
1177     case Instruction::Or:
1178     case Instruction::Xor:
1179     case Instruction::FAdd:
1180     case Instruction::FSub:
1181     case Instruction::FMul:
1182     case Instruction::FDiv:
1183     case Instruction::ICmp:
1184     case Instruction::FCmp:
1185       return true;
1186     case Instruction::Shl:
1187     case Instruction::LShr:
1188     case Instruction::AShr:
1189     case Instruction::UDiv:
1190     case Instruction::SDiv:
1191     case Instruction::URem:
1192     case Instruction::SRem:
1193       return Operand == 1;
1194     case Instruction::Call:
1195       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1196         switch (II->getIntrinsicID()) {
1197         case Intrinsic::fma:
1198         case Intrinsic::vp_fma:
1199           return Operand == 0 || Operand == 1;
1200         // FIXME: Our patterns can only match vx/vf instructions when the splat
1201         // it on the RHS, because TableGen doesn't recognize our VP operations
1202         // as commutative.
1203         case Intrinsic::vp_add:
1204         case Intrinsic::vp_mul:
1205         case Intrinsic::vp_and:
1206         case Intrinsic::vp_or:
1207         case Intrinsic::vp_xor:
1208         case Intrinsic::vp_fadd:
1209         case Intrinsic::vp_fmul:
1210         case Intrinsic::vp_shl:
1211         case Intrinsic::vp_lshr:
1212         case Intrinsic::vp_ashr:
1213         case Intrinsic::vp_udiv:
1214         case Intrinsic::vp_sdiv:
1215         case Intrinsic::vp_urem:
1216         case Intrinsic::vp_srem:
1217           return Operand == 1;
1218         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1219         // explicit patterns for both LHS and RHS (as 'vr' versions).
1220         case Intrinsic::vp_sub:
1221         case Intrinsic::vp_fsub:
1222         case Intrinsic::vp_fdiv:
1223           return Operand == 0 || Operand == 1;
1224         default:
1225           return false;
1226         }
1227       }
1228       return false;
1229     default:
1230       return false;
1231     }
1232   };
1233 
1234   for (auto OpIdx : enumerate(I->operands())) {
1235     if (!IsSinker(I, OpIdx.index()))
1236       continue;
1237 
1238     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1239     // Make sure we are not already sinking this operand
1240     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1241       continue;
1242 
1243     // We are looking for a splat that can be sunk.
1244     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1245                              m_Undef(), m_ZeroMask())))
1246       continue;
1247 
1248     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1249     // and vector registers
1250     for (Use &U : Op->uses()) {
1251       Instruction *Insn = cast<Instruction>(U.getUser());
1252       if (!IsSinker(Insn, U.getOperandNo()))
1253         return false;
1254     }
1255 
1256     Ops.push_back(&Op->getOperandUse(0));
1257     Ops.push_back(&OpIdx.value());
1258   }
1259   return true;
1260 }
1261 
1262 bool RISCVTargetLowering::isOffsetFoldingLegal(
1263     const GlobalAddressSDNode *GA) const {
1264   // In order to maximise the opportunity for common subexpression elimination,
1265   // keep a separate ADD node for the global address offset instead of folding
1266   // it in the global address node. Later peephole optimisations may choose to
1267   // fold it back in when profitable.
1268   return false;
1269 }
1270 
1271 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1272                                        bool ForCodeSize) const {
1273   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1274   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1275     return false;
1276   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1277     return false;
1278   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1279     return false;
1280   return Imm.isZero();
1281 }
1282 
1283 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1284   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1285          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1286          (VT == MVT::f64 && Subtarget.hasStdExtD());
1287 }
1288 
1289 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1290                                                       CallingConv::ID CC,
1291                                                       EVT VT) const {
1292   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1293   // We might still end up using a GPR but that will be decided based on ABI.
1294   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1295   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1296     return MVT::f32;
1297 
1298   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1299 }
1300 
1301 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1302                                                            CallingConv::ID CC,
1303                                                            EVT VT) const {
1304   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1305   // We might still end up using a GPR but that will be decided based on ABI.
1306   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1307   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1308     return 1;
1309 
1310   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1311 }
1312 
1313 // Changes the condition code and swaps operands if necessary, so the SetCC
1314 // operation matches one of the comparisons supported directly by branches
1315 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1316 // with 1/-1.
1317 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1318                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1319   // Convert X > -1 to X >= 0.
1320   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1321     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1322     CC = ISD::SETGE;
1323     return;
1324   }
1325   // Convert X < 1 to 0 >= X.
1326   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1327     RHS = LHS;
1328     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1329     CC = ISD::SETGE;
1330     return;
1331   }
1332 
1333   switch (CC) {
1334   default:
1335     break;
1336   case ISD::SETGT:
1337   case ISD::SETLE:
1338   case ISD::SETUGT:
1339   case ISD::SETULE:
1340     CC = ISD::getSetCCSwappedOperands(CC);
1341     std::swap(LHS, RHS);
1342     break;
1343   }
1344 }
1345 
1346 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1347   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1348   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1349   if (VT.getVectorElementType() == MVT::i1)
1350     KnownSize *= 8;
1351 
1352   switch (KnownSize) {
1353   default:
1354     llvm_unreachable("Invalid LMUL.");
1355   case 8:
1356     return RISCVII::VLMUL::LMUL_F8;
1357   case 16:
1358     return RISCVII::VLMUL::LMUL_F4;
1359   case 32:
1360     return RISCVII::VLMUL::LMUL_F2;
1361   case 64:
1362     return RISCVII::VLMUL::LMUL_1;
1363   case 128:
1364     return RISCVII::VLMUL::LMUL_2;
1365   case 256:
1366     return RISCVII::VLMUL::LMUL_4;
1367   case 512:
1368     return RISCVII::VLMUL::LMUL_8;
1369   }
1370 }
1371 
1372 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1373   switch (LMul) {
1374   default:
1375     llvm_unreachable("Invalid LMUL.");
1376   case RISCVII::VLMUL::LMUL_F8:
1377   case RISCVII::VLMUL::LMUL_F4:
1378   case RISCVII::VLMUL::LMUL_F2:
1379   case RISCVII::VLMUL::LMUL_1:
1380     return RISCV::VRRegClassID;
1381   case RISCVII::VLMUL::LMUL_2:
1382     return RISCV::VRM2RegClassID;
1383   case RISCVII::VLMUL::LMUL_4:
1384     return RISCV::VRM4RegClassID;
1385   case RISCVII::VLMUL::LMUL_8:
1386     return RISCV::VRM8RegClassID;
1387   }
1388 }
1389 
1390 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1391   RISCVII::VLMUL LMUL = getLMUL(VT);
1392   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1393       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1394       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1395       LMUL == RISCVII::VLMUL::LMUL_1) {
1396     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1397                   "Unexpected subreg numbering");
1398     return RISCV::sub_vrm1_0 + Index;
1399   }
1400   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1401     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1402                   "Unexpected subreg numbering");
1403     return RISCV::sub_vrm2_0 + Index;
1404   }
1405   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1406     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1407                   "Unexpected subreg numbering");
1408     return RISCV::sub_vrm4_0 + Index;
1409   }
1410   llvm_unreachable("Invalid vector type.");
1411 }
1412 
1413 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1414   if (VT.getVectorElementType() == MVT::i1)
1415     return RISCV::VRRegClassID;
1416   return getRegClassIDForLMUL(getLMUL(VT));
1417 }
1418 
1419 // Attempt to decompose a subvector insert/extract between VecVT and
1420 // SubVecVT via subregister indices. Returns the subregister index that
1421 // can perform the subvector insert/extract with the given element index, as
1422 // well as the index corresponding to any leftover subvectors that must be
1423 // further inserted/extracted within the register class for SubVecVT.
1424 std::pair<unsigned, unsigned>
1425 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1426     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1427     const RISCVRegisterInfo *TRI) {
1428   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1429                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1430                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1431                 "Register classes not ordered");
1432   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1433   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1434   // Try to compose a subregister index that takes us from the incoming
1435   // LMUL>1 register class down to the outgoing one. At each step we half
1436   // the LMUL:
1437   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1438   // Note that this is not guaranteed to find a subregister index, such as
1439   // when we are extracting from one VR type to another.
1440   unsigned SubRegIdx = RISCV::NoSubRegister;
1441   for (const unsigned RCID :
1442        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1443     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1444       VecVT = VecVT.getHalfNumVectorElementsVT();
1445       bool IsHi =
1446           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1447       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1448                                             getSubregIndexByMVT(VecVT, IsHi));
1449       if (IsHi)
1450         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1451     }
1452   return {SubRegIdx, InsertExtractIdx};
1453 }
1454 
1455 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1456 // stores for those types.
1457 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1458   return !Subtarget.useRVVForFixedLengthVectors() ||
1459          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1460 }
1461 
1462 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1463   if (ScalarTy->isPointerTy())
1464     return true;
1465 
1466   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1467       ScalarTy->isIntegerTy(32))
1468     return true;
1469 
1470   if (ScalarTy->isIntegerTy(64))
1471     return Subtarget.hasVInstructionsI64();
1472 
1473   if (ScalarTy->isHalfTy())
1474     return Subtarget.hasVInstructionsF16();
1475   if (ScalarTy->isFloatTy())
1476     return Subtarget.hasVInstructionsF32();
1477   if (ScalarTy->isDoubleTy())
1478     return Subtarget.hasVInstructionsF64();
1479 
1480   return false;
1481 }
1482 
1483 static SDValue getVLOperand(SDValue Op) {
1484   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1485           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1486          "Unexpected opcode");
1487   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1488   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1489   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1490       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1491   if (!II)
1492     return SDValue();
1493   return Op.getOperand(II->VLOperand + 1 + HasChain);
1494 }
1495 
1496 static bool useRVVForFixedLengthVectorVT(MVT VT,
1497                                          const RISCVSubtarget &Subtarget) {
1498   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1499   if (!Subtarget.useRVVForFixedLengthVectors())
1500     return false;
1501 
1502   // We only support a set of vector types with a consistent maximum fixed size
1503   // across all supported vector element types to avoid legalization issues.
1504   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1505   // fixed-length vector type we support is 1024 bytes.
1506   if (VT.getFixedSizeInBits() > 1024 * 8)
1507     return false;
1508 
1509   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1510 
1511   MVT EltVT = VT.getVectorElementType();
1512 
1513   // Don't use RVV for vectors we cannot scalarize if required.
1514   switch (EltVT.SimpleTy) {
1515   // i1 is supported but has different rules.
1516   default:
1517     return false;
1518   case MVT::i1:
1519     // Masks can only use a single register.
1520     if (VT.getVectorNumElements() > MinVLen)
1521       return false;
1522     MinVLen /= 8;
1523     break;
1524   case MVT::i8:
1525   case MVT::i16:
1526   case MVT::i32:
1527     break;
1528   case MVT::i64:
1529     if (!Subtarget.hasVInstructionsI64())
1530       return false;
1531     break;
1532   case MVT::f16:
1533     if (!Subtarget.hasVInstructionsF16())
1534       return false;
1535     break;
1536   case MVT::f32:
1537     if (!Subtarget.hasVInstructionsF32())
1538       return false;
1539     break;
1540   case MVT::f64:
1541     if (!Subtarget.hasVInstructionsF64())
1542       return false;
1543     break;
1544   }
1545 
1546   // Reject elements larger than ELEN.
1547   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1548     return false;
1549 
1550   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1551   // Don't use RVV for types that don't fit.
1552   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1553     return false;
1554 
1555   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1556   // the base fixed length RVV support in place.
1557   if (!VT.isPow2VectorType())
1558     return false;
1559 
1560   return true;
1561 }
1562 
1563 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1564   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1565 }
1566 
1567 // Return the largest legal scalable vector type that matches VT's element type.
1568 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1569                                             const RISCVSubtarget &Subtarget) {
1570   // This may be called before legal types are setup.
1571   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1572           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1573          "Expected legal fixed length vector!");
1574 
1575   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1576   unsigned MaxELen = Subtarget.getELEN();
1577 
1578   MVT EltVT = VT.getVectorElementType();
1579   switch (EltVT.SimpleTy) {
1580   default:
1581     llvm_unreachable("unexpected element type for RVV container");
1582   case MVT::i1:
1583   case MVT::i8:
1584   case MVT::i16:
1585   case MVT::i32:
1586   case MVT::i64:
1587   case MVT::f16:
1588   case MVT::f32:
1589   case MVT::f64: {
1590     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1591     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1592     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1593     unsigned NumElts =
1594         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1595     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1596     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1597     return MVT::getScalableVectorVT(EltVT, NumElts);
1598   }
1599   }
1600 }
1601 
1602 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1603                                             const RISCVSubtarget &Subtarget) {
1604   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1605                                           Subtarget);
1606 }
1607 
1608 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1609   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1610 }
1611 
1612 // Grow V to consume an entire RVV register.
1613 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1614                                        const RISCVSubtarget &Subtarget) {
1615   assert(VT.isScalableVector() &&
1616          "Expected to convert into a scalable vector!");
1617   assert(V.getValueType().isFixedLengthVector() &&
1618          "Expected a fixed length vector operand!");
1619   SDLoc DL(V);
1620   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1621   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1622 }
1623 
1624 // Shrink V so it's just big enough to maintain a VT's worth of data.
1625 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1626                                          const RISCVSubtarget &Subtarget) {
1627   assert(VT.isFixedLengthVector() &&
1628          "Expected to convert into a fixed length vector!");
1629   assert(V.getValueType().isScalableVector() &&
1630          "Expected a scalable vector operand!");
1631   SDLoc DL(V);
1632   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1633   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1634 }
1635 
1636 /// Return the type of the mask type suitable for masking the provided
1637 /// vector type.  This is simply an i1 element type vector of the same
1638 /// (possibly scalable) length.
1639 static MVT getMaskTypeFor(EVT VecVT) {
1640   assert(VecVT.isVector());
1641   ElementCount EC = VecVT.getVectorElementCount();
1642   return MVT::getVectorVT(MVT::i1, EC);
1643 }
1644 
1645 /// Creates an all ones mask suitable for masking a vector of type VecTy with
1646 /// vector length VL.  .
1647 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL,
1648                               SelectionDAG &DAG) {
1649   MVT MaskVT = getMaskTypeFor(VecVT);
1650   return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1651 }
1652 
1653 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1654 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1655 // the vector type that it is contained in.
1656 static std::pair<SDValue, SDValue>
1657 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1658                 const RISCVSubtarget &Subtarget) {
1659   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1660   MVT XLenVT = Subtarget.getXLenVT();
1661   SDValue VL = VecVT.isFixedLengthVector()
1662                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1663                    : DAG.getRegister(RISCV::X0, XLenVT);
1664   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
1665   return {Mask, VL};
1666 }
1667 
1668 // As above but assuming the given type is a scalable vector type.
1669 static std::pair<SDValue, SDValue>
1670 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1671                         const RISCVSubtarget &Subtarget) {
1672   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1673   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1674 }
1675 
1676 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1677 // of either is (currently) supported. This can get us into an infinite loop
1678 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1679 // as a ..., etc.
1680 // Until either (or both) of these can reliably lower any node, reporting that
1681 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1682 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1683 // which is not desirable.
1684 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1685     EVT VT, unsigned DefinedValues) const {
1686   return false;
1687 }
1688 
1689 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1690                                   const RISCVSubtarget &Subtarget) {
1691   // RISCV FP-to-int conversions saturate to the destination register size, but
1692   // don't produce 0 for nan. We can use a conversion instruction and fix the
1693   // nan case with a compare and a select.
1694   SDValue Src = Op.getOperand(0);
1695 
1696   EVT DstVT = Op.getValueType();
1697   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1698 
1699   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1700   unsigned Opc;
1701   if (SatVT == DstVT)
1702     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1703   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1704     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1705   else
1706     return SDValue();
1707   // FIXME: Support other SatVTs by clamping before or after the conversion.
1708 
1709   SDLoc DL(Op);
1710   SDValue FpToInt = DAG.getNode(
1711       Opc, DL, DstVT, Src,
1712       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1713 
1714   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1715   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1716 }
1717 
1718 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1719 // and back. Taking care to avoid converting values that are nan or already
1720 // correct.
1721 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1722 // have FRM dependencies modeled yet.
1723 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1724   MVT VT = Op.getSimpleValueType();
1725   assert(VT.isVector() && "Unexpected type");
1726 
1727   SDLoc DL(Op);
1728 
1729   // Freeze the source since we are increasing the number of uses.
1730   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1731 
1732   // Truncate to integer and convert back to FP.
1733   MVT IntVT = VT.changeVectorElementTypeToInteger();
1734   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1735   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1736 
1737   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1738 
1739   if (Op.getOpcode() == ISD::FCEIL) {
1740     // If the truncated value is the greater than or equal to the original
1741     // value, we've computed the ceil. Otherwise, we went the wrong way and
1742     // need to increase by 1.
1743     // FIXME: This should use a masked operation. Handle here or in isel?
1744     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1745                                  DAG.getConstantFP(1.0, DL, VT));
1746     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1747     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1748   } else if (Op.getOpcode() == ISD::FFLOOR) {
1749     // If the truncated value is the less than or equal to the original value,
1750     // we've computed the floor. Otherwise, we went the wrong way and need to
1751     // decrease by 1.
1752     // FIXME: This should use a masked operation. Handle here or in isel?
1753     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1754                                  DAG.getConstantFP(1.0, DL, VT));
1755     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1756     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1757   }
1758 
1759   // Restore the original sign so that -0.0 is preserved.
1760   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1761 
1762   // Determine the largest integer that can be represented exactly. This and
1763   // values larger than it don't have any fractional bits so don't need to
1764   // be converted.
1765   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1766   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1767   APFloat MaxVal = APFloat(FltSem);
1768   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1769                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1770   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1771 
1772   // If abs(Src) was larger than MaxVal or nan, keep it.
1773   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1774   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1775   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1776 }
1777 
1778 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1779 // This mode isn't supported in vector hardware on RISCV. But as long as we
1780 // aren't compiling with trapping math, we can emulate this with
1781 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1782 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1783 // dependencies modeled yet.
1784 // FIXME: Use masked operations to avoid final merge.
1785 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1786   MVT VT = Op.getSimpleValueType();
1787   assert(VT.isVector() && "Unexpected type");
1788 
1789   SDLoc DL(Op);
1790 
1791   // Freeze the source since we are increasing the number of uses.
1792   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1793 
1794   // We do the conversion on the absolute value and fix the sign at the end.
1795   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1796 
1797   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1798   bool Ignored;
1799   APFloat Point5Pred = APFloat(0.5f);
1800   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1801   Point5Pred.next(/*nextDown*/ true);
1802 
1803   // Add the adjustment.
1804   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1805                                DAG.getConstantFP(Point5Pred, DL, VT));
1806 
1807   // Truncate to integer and convert back to fp.
1808   MVT IntVT = VT.changeVectorElementTypeToInteger();
1809   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1810   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1811 
1812   // Restore the original sign.
1813   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1814 
1815   // Determine the largest integer that can be represented exactly. This and
1816   // values larger than it don't have any fractional bits so don't need to
1817   // be converted.
1818   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1819   APFloat MaxVal = APFloat(FltSem);
1820   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1821                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1822   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1823 
1824   // If abs(Src) was larger than MaxVal or nan, keep it.
1825   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1826   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1827   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1828 }
1829 
1830 struct VIDSequence {
1831   int64_t StepNumerator;
1832   unsigned StepDenominator;
1833   int64_t Addend;
1834 };
1835 
1836 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1837 // to the (non-zero) step S and start value X. This can be then lowered as the
1838 // RVV sequence (VID * S) + X, for example.
1839 // The step S is represented as an integer numerator divided by a positive
1840 // denominator. Note that the implementation currently only identifies
1841 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1842 // cannot detect 2/3, for example.
1843 // Note that this method will also match potentially unappealing index
1844 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1845 // determine whether this is worth generating code for.
1846 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1847   unsigned NumElts = Op.getNumOperands();
1848   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1849   if (!Op.getValueType().isInteger())
1850     return None;
1851 
1852   Optional<unsigned> SeqStepDenom;
1853   Optional<int64_t> SeqStepNum, SeqAddend;
1854   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1855   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1856   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1857     // Assume undef elements match the sequence; we just have to be careful
1858     // when interpolating across them.
1859     if (Op.getOperand(Idx).isUndef())
1860       continue;
1861     // The BUILD_VECTOR must be all constants.
1862     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1863       return None;
1864 
1865     uint64_t Val = Op.getConstantOperandVal(Idx) &
1866                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1867 
1868     if (PrevElt) {
1869       // Calculate the step since the last non-undef element, and ensure
1870       // it's consistent across the entire sequence.
1871       unsigned IdxDiff = Idx - PrevElt->second;
1872       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1873 
1874       // A zero-value value difference means that we're somewhere in the middle
1875       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1876       // step change before evaluating the sequence.
1877       if (ValDiff == 0)
1878         continue;
1879 
1880       int64_t Remainder = ValDiff % IdxDiff;
1881       // Normalize the step if it's greater than 1.
1882       if (Remainder != ValDiff) {
1883         // The difference must cleanly divide the element span.
1884         if (Remainder != 0)
1885           return None;
1886         ValDiff /= IdxDiff;
1887         IdxDiff = 1;
1888       }
1889 
1890       if (!SeqStepNum)
1891         SeqStepNum = ValDiff;
1892       else if (ValDiff != SeqStepNum)
1893         return None;
1894 
1895       if (!SeqStepDenom)
1896         SeqStepDenom = IdxDiff;
1897       else if (IdxDiff != *SeqStepDenom)
1898         return None;
1899     }
1900 
1901     // Record this non-undef element for later.
1902     if (!PrevElt || PrevElt->first != Val)
1903       PrevElt = std::make_pair(Val, Idx);
1904   }
1905 
1906   // We need to have logged a step for this to count as a legal index sequence.
1907   if (!SeqStepNum || !SeqStepDenom)
1908     return None;
1909 
1910   // Loop back through the sequence and validate elements we might have skipped
1911   // while waiting for a valid step. While doing this, log any sequence addend.
1912   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1913     if (Op.getOperand(Idx).isUndef())
1914       continue;
1915     uint64_t Val = Op.getConstantOperandVal(Idx) &
1916                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1917     uint64_t ExpectedVal =
1918         (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1919     int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1920     if (!SeqAddend)
1921       SeqAddend = Addend;
1922     else if (Addend != SeqAddend)
1923       return None;
1924   }
1925 
1926   assert(SeqAddend && "Must have an addend if we have a step");
1927 
1928   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1929 }
1930 
1931 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1932 // and lower it as a VRGATHER_VX_VL from the source vector.
1933 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1934                                   SelectionDAG &DAG,
1935                                   const RISCVSubtarget &Subtarget) {
1936   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1937     return SDValue();
1938   SDValue Vec = SplatVal.getOperand(0);
1939   // Only perform this optimization on vectors of the same size for simplicity.
1940   // Don't perform this optimization for i1 vectors.
1941   // FIXME: Support i1 vectors, maybe by promoting to i8?
1942   if (Vec.getValueType() != VT || VT.getVectorElementType() == MVT::i1)
1943     return SDValue();
1944   SDValue Idx = SplatVal.getOperand(1);
1945   // The index must be a legal type.
1946   if (Idx.getValueType() != Subtarget.getXLenVT())
1947     return SDValue();
1948 
1949   MVT ContainerVT = VT;
1950   if (VT.isFixedLengthVector()) {
1951     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1952     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
1953   }
1954 
1955   SDValue Mask, VL;
1956   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1957 
1958   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
1959                                Idx, Mask, VL);
1960 
1961   if (!VT.isFixedLengthVector())
1962     return Gather;
1963 
1964   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1965 }
1966 
1967 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1968                                  const RISCVSubtarget &Subtarget) {
1969   MVT VT = Op.getSimpleValueType();
1970   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1971 
1972   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1973 
1974   SDLoc DL(Op);
1975   SDValue Mask, VL;
1976   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1977 
1978   MVT XLenVT = Subtarget.getXLenVT();
1979   unsigned NumElts = Op.getNumOperands();
1980 
1981   if (VT.getVectorElementType() == MVT::i1) {
1982     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1983       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1984       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1985     }
1986 
1987     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1988       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1989       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1990     }
1991 
1992     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1993     // scalar integer chunks whose bit-width depends on the number of mask
1994     // bits and XLEN.
1995     // First, determine the most appropriate scalar integer type to use. This
1996     // is at most XLenVT, but may be shrunk to a smaller vector element type
1997     // according to the size of the final vector - use i8 chunks rather than
1998     // XLenVT if we're producing a v8i1. This results in more consistent
1999     // codegen across RV32 and RV64.
2000     unsigned NumViaIntegerBits =
2001         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2002     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
2003     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2004       // If we have to use more than one INSERT_VECTOR_ELT then this
2005       // optimization is likely to increase code size; avoid peforming it in
2006       // such a case. We can use a load from a constant pool in this case.
2007       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2008         return SDValue();
2009       // Now we can create our integer vector type. Note that it may be larger
2010       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2011       MVT IntegerViaVecVT =
2012           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2013                            divideCeil(NumElts, NumViaIntegerBits));
2014 
2015       uint64_t Bits = 0;
2016       unsigned BitPos = 0, IntegerEltIdx = 0;
2017       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2018 
2019       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2020         // Once we accumulate enough bits to fill our scalar type, insert into
2021         // our vector and clear our accumulated data.
2022         if (I != 0 && I % NumViaIntegerBits == 0) {
2023           if (NumViaIntegerBits <= 32)
2024             Bits = SignExtend64<32>(Bits);
2025           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2026           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2027                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2028           Bits = 0;
2029           BitPos = 0;
2030           IntegerEltIdx++;
2031         }
2032         SDValue V = Op.getOperand(I);
2033         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2034         Bits |= ((uint64_t)BitValue << BitPos);
2035       }
2036 
2037       // Insert the (remaining) scalar value into position in our integer
2038       // vector type.
2039       if (NumViaIntegerBits <= 32)
2040         Bits = SignExtend64<32>(Bits);
2041       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2042       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2043                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2044 
2045       if (NumElts < NumViaIntegerBits) {
2046         // If we're producing a smaller vector than our minimum legal integer
2047         // type, bitcast to the equivalent (known-legal) mask type, and extract
2048         // our final mask.
2049         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2050         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2051         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2052                           DAG.getConstant(0, DL, XLenVT));
2053       } else {
2054         // Else we must have produced an integer type with the same size as the
2055         // mask type; bitcast for the final result.
2056         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2057         Vec = DAG.getBitcast(VT, Vec);
2058       }
2059 
2060       return Vec;
2061     }
2062 
2063     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2064     // vector type, we have a legal equivalently-sized i8 type, so we can use
2065     // that.
2066     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2067     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2068 
2069     SDValue WideVec;
2070     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2071       // For a splat, perform a scalar truncate before creating the wider
2072       // vector.
2073       assert(Splat.getValueType() == XLenVT &&
2074              "Unexpected type for i1 splat value");
2075       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2076                           DAG.getConstant(1, DL, XLenVT));
2077       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2078     } else {
2079       SmallVector<SDValue, 8> Ops(Op->op_values());
2080       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2081       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2082       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2083     }
2084 
2085     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2086   }
2087 
2088   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2089     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2090       return Gather;
2091     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2092                                         : RISCVISD::VMV_V_X_VL;
2093     Splat =
2094         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2095     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2096   }
2097 
2098   // Try and match index sequences, which we can lower to the vid instruction
2099   // with optional modifications. An all-undef vector is matched by
2100   // getSplatValue, above.
2101   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2102     int64_t StepNumerator = SimpleVID->StepNumerator;
2103     unsigned StepDenominator = SimpleVID->StepDenominator;
2104     int64_t Addend = SimpleVID->Addend;
2105 
2106     assert(StepNumerator != 0 && "Invalid step");
2107     bool Negate = false;
2108     int64_t SplatStepVal = StepNumerator;
2109     unsigned StepOpcode = ISD::MUL;
2110     if (StepNumerator != 1) {
2111       if (isPowerOf2_64(std::abs(StepNumerator))) {
2112         Negate = StepNumerator < 0;
2113         StepOpcode = ISD::SHL;
2114         SplatStepVal = Log2_64(std::abs(StepNumerator));
2115       }
2116     }
2117 
2118     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2119     // threshold since it's the immediate value many RVV instructions accept.
2120     // There is no vmul.vi instruction so ensure multiply constant can fit in
2121     // a single addi instruction.
2122     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2123          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2124         isPowerOf2_32(StepDenominator) &&
2125         (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) {
2126       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2127       // Convert right out of the scalable type so we can use standard ISD
2128       // nodes for the rest of the computation. If we used scalable types with
2129       // these, we'd lose the fixed-length vector info and generate worse
2130       // vsetvli code.
2131       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2132       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2133           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2134         SDValue SplatStep = DAG.getSplatBuildVector(
2135             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2136         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2137       }
2138       if (StepDenominator != 1) {
2139         SDValue SplatStep = DAG.getSplatBuildVector(
2140             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2141         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2142       }
2143       if (Addend != 0 || Negate) {
2144         SDValue SplatAddend = DAG.getSplatBuildVector(
2145             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2146         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2147       }
2148       return VID;
2149     }
2150   }
2151 
2152   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2153   // when re-interpreted as a vector with a larger element type. For example,
2154   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2155   // could be instead splat as
2156   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2157   // TODO: This optimization could also work on non-constant splats, but it
2158   // would require bit-manipulation instructions to construct the splat value.
2159   SmallVector<SDValue> Sequence;
2160   unsigned EltBitSize = VT.getScalarSizeInBits();
2161   const auto *BV = cast<BuildVectorSDNode>(Op);
2162   if (VT.isInteger() && EltBitSize < 64 &&
2163       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2164       BV->getRepeatedSequence(Sequence) &&
2165       (Sequence.size() * EltBitSize) <= 64) {
2166     unsigned SeqLen = Sequence.size();
2167     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2168     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2169     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2170             ViaIntVT == MVT::i64) &&
2171            "Unexpected sequence type");
2172 
2173     unsigned EltIdx = 0;
2174     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2175     uint64_t SplatValue = 0;
2176     // Construct the amalgamated value which can be splatted as this larger
2177     // vector type.
2178     for (const auto &SeqV : Sequence) {
2179       if (!SeqV.isUndef())
2180         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2181                        << (EltIdx * EltBitSize));
2182       EltIdx++;
2183     }
2184 
2185     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2186     // achieve better constant materializion.
2187     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2188       SplatValue = SignExtend64<32>(SplatValue);
2189 
2190     // Since we can't introduce illegal i64 types at this stage, we can only
2191     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2192     // way we can use RVV instructions to splat.
2193     assert((ViaIntVT.bitsLE(XLenVT) ||
2194             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2195            "Unexpected bitcast sequence");
2196     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2197       SDValue ViaVL =
2198           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2199       MVT ViaContainerVT =
2200           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2201       SDValue Splat =
2202           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2203                       DAG.getUNDEF(ViaContainerVT),
2204                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2205       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2206       return DAG.getBitcast(VT, Splat);
2207     }
2208   }
2209 
2210   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2211   // which constitute a large proportion of the elements. In such cases we can
2212   // splat a vector with the dominant element and make up the shortfall with
2213   // INSERT_VECTOR_ELTs.
2214   // Note that this includes vectors of 2 elements by association. The
2215   // upper-most element is the "dominant" one, allowing us to use a splat to
2216   // "insert" the upper element, and an insert of the lower element at position
2217   // 0, which improves codegen.
2218   SDValue DominantValue;
2219   unsigned MostCommonCount = 0;
2220   DenseMap<SDValue, unsigned> ValueCounts;
2221   unsigned NumUndefElts =
2222       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2223 
2224   // Track the number of scalar loads we know we'd be inserting, estimated as
2225   // any non-zero floating-point constant. Other kinds of element are either
2226   // already in registers or are materialized on demand. The threshold at which
2227   // a vector load is more desirable than several scalar materializion and
2228   // vector-insertion instructions is not known.
2229   unsigned NumScalarLoads = 0;
2230 
2231   for (SDValue V : Op->op_values()) {
2232     if (V.isUndef())
2233       continue;
2234 
2235     ValueCounts.insert(std::make_pair(V, 0));
2236     unsigned &Count = ValueCounts[V];
2237 
2238     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2239       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2240 
2241     // Is this value dominant? In case of a tie, prefer the highest element as
2242     // it's cheaper to insert near the beginning of a vector than it is at the
2243     // end.
2244     if (++Count >= MostCommonCount) {
2245       DominantValue = V;
2246       MostCommonCount = Count;
2247     }
2248   }
2249 
2250   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2251   unsigned NumDefElts = NumElts - NumUndefElts;
2252   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2253 
2254   // Don't perform this optimization when optimizing for size, since
2255   // materializing elements and inserting them tends to cause code bloat.
2256   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2257       ((MostCommonCount > DominantValueCountThreshold) ||
2258        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2259     // Start by splatting the most common element.
2260     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2261 
2262     DenseSet<SDValue> Processed{DominantValue};
2263     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2264     for (const auto &OpIdx : enumerate(Op->ops())) {
2265       const SDValue &V = OpIdx.value();
2266       if (V.isUndef() || !Processed.insert(V).second)
2267         continue;
2268       if (ValueCounts[V] == 1) {
2269         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2270                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2271       } else {
2272         // Blend in all instances of this value using a VSELECT, using a
2273         // mask where each bit signals whether that element is the one
2274         // we're after.
2275         SmallVector<SDValue> Ops;
2276         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2277           return DAG.getConstant(V == V1, DL, XLenVT);
2278         });
2279         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2280                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2281                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2282       }
2283     }
2284 
2285     return Vec;
2286   }
2287 
2288   return SDValue();
2289 }
2290 
2291 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2292                                    SDValue Lo, SDValue Hi, SDValue VL,
2293                                    SelectionDAG &DAG) {
2294   if (!Passthru)
2295     Passthru = DAG.getUNDEF(VT);
2296   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2297     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2298     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2299     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2300     // node in order to try and match RVV vector/scalar instructions.
2301     if ((LoC >> 31) == HiC)
2302       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2303 
2304     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2305     // vmv.v.x whose EEW = 32 to lower it.
2306     auto *Const = dyn_cast<ConstantSDNode>(VL);
2307     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2308       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2309       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2310       // access the subtarget here now.
2311       auto InterVec = DAG.getNode(
2312           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2313                                   DAG.getRegister(RISCV::X0, MVT::i32));
2314       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2315     }
2316   }
2317 
2318   // Fall back to a stack store and stride x0 vector load.
2319   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2320                      Hi, VL);
2321 }
2322 
2323 // Called by type legalization to handle splat of i64 on RV32.
2324 // FIXME: We can optimize this when the type has sign or zero bits in one
2325 // of the halves.
2326 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2327                                    SDValue Scalar, SDValue VL,
2328                                    SelectionDAG &DAG) {
2329   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2330   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2331                            DAG.getConstant(0, DL, MVT::i32));
2332   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2333                            DAG.getConstant(1, DL, MVT::i32));
2334   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2335 }
2336 
2337 // This function lowers a splat of a scalar operand Splat with the vector
2338 // length VL. It ensures the final sequence is type legal, which is useful when
2339 // lowering a splat after type legalization.
2340 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2341                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2342                                 const RISCVSubtarget &Subtarget) {
2343   bool HasPassthru = Passthru && !Passthru.isUndef();
2344   if (!HasPassthru && !Passthru)
2345     Passthru = DAG.getUNDEF(VT);
2346   if (VT.isFloatingPoint()) {
2347     // If VL is 1, we could use vfmv.s.f.
2348     if (isOneConstant(VL))
2349       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2350     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2351   }
2352 
2353   MVT XLenVT = Subtarget.getXLenVT();
2354 
2355   // Simplest case is that the operand needs to be promoted to XLenVT.
2356   if (Scalar.getValueType().bitsLE(XLenVT)) {
2357     // If the operand is a constant, sign extend to increase our chances
2358     // of being able to use a .vi instruction. ANY_EXTEND would become a
2359     // a zero extend and the simm5 check in isel would fail.
2360     // FIXME: Should we ignore the upper bits in isel instead?
2361     unsigned ExtOpc =
2362         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2363     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2364     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2365     // If VL is 1 and the scalar value won't benefit from immediate, we could
2366     // use vmv.s.x.
2367     if (isOneConstant(VL) &&
2368         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2369       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2370     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2371   }
2372 
2373   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2374          "Unexpected scalar for splat lowering!");
2375 
2376   if (isOneConstant(VL) && isNullConstant(Scalar))
2377     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2378                        DAG.getConstant(0, DL, XLenVT), VL);
2379 
2380   // Otherwise use the more complicated splatting algorithm.
2381   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2382 }
2383 
2384 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2385                                 const RISCVSubtarget &Subtarget) {
2386   // We need to be able to widen elements to the next larger integer type.
2387   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2388     return false;
2389 
2390   int Size = Mask.size();
2391   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2392 
2393   int Srcs[] = {-1, -1};
2394   for (int i = 0; i != Size; ++i) {
2395     // Ignore undef elements.
2396     if (Mask[i] < 0)
2397       continue;
2398 
2399     // Is this an even or odd element.
2400     int Pol = i % 2;
2401 
2402     // Ensure we consistently use the same source for this element polarity.
2403     int Src = Mask[i] / Size;
2404     if (Srcs[Pol] < 0)
2405       Srcs[Pol] = Src;
2406     if (Srcs[Pol] != Src)
2407       return false;
2408 
2409     // Make sure the element within the source is appropriate for this element
2410     // in the destination.
2411     int Elt = Mask[i] % Size;
2412     if (Elt != i / 2)
2413       return false;
2414   }
2415 
2416   // We need to find a source for each polarity and they can't be the same.
2417   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2418     return false;
2419 
2420   // Swap the sources if the second source was in the even polarity.
2421   SwapSources = Srcs[0] > Srcs[1];
2422 
2423   return true;
2424 }
2425 
2426 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2427 /// and then extract the original number of elements from the rotated result.
2428 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2429 /// returned rotation amount is for a rotate right, where elements move from
2430 /// higher elements to lower elements. \p LoSrc indicates the first source
2431 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2432 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2433 /// 0 or 1 if a rotation is found.
2434 ///
2435 /// NOTE: We talk about rotate to the right which matches how bit shift and
2436 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2437 /// and the table below write vectors with the lowest elements on the left.
2438 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2439   int Size = Mask.size();
2440 
2441   // We need to detect various ways of spelling a rotation:
2442   //   [11, 12, 13, 14, 15,  0,  1,  2]
2443   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2444   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2445   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2446   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2447   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2448   int Rotation = 0;
2449   LoSrc = -1;
2450   HiSrc = -1;
2451   for (int i = 0; i != Size; ++i) {
2452     int M = Mask[i];
2453     if (M < 0)
2454       continue;
2455 
2456     // Determine where a rotate vector would have started.
2457     int StartIdx = i - (M % Size);
2458     // The identity rotation isn't interesting, stop.
2459     if (StartIdx == 0)
2460       return -1;
2461 
2462     // If we found the tail of a vector the rotation must be the missing
2463     // front. If we found the head of a vector, it must be how much of the
2464     // head.
2465     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2466 
2467     if (Rotation == 0)
2468       Rotation = CandidateRotation;
2469     else if (Rotation != CandidateRotation)
2470       // The rotations don't match, so we can't match this mask.
2471       return -1;
2472 
2473     // Compute which value this mask is pointing at.
2474     int MaskSrc = M < Size ? 0 : 1;
2475 
2476     // Compute which of the two target values this index should be assigned to.
2477     // This reflects whether the high elements are remaining or the low elemnts
2478     // are remaining.
2479     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2480 
2481     // Either set up this value if we've not encountered it before, or check
2482     // that it remains consistent.
2483     if (TargetSrc < 0)
2484       TargetSrc = MaskSrc;
2485     else if (TargetSrc != MaskSrc)
2486       // This may be a rotation, but it pulls from the inputs in some
2487       // unsupported interleaving.
2488       return -1;
2489   }
2490 
2491   // Check that we successfully analyzed the mask, and normalize the results.
2492   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2493   assert((LoSrc >= 0 || HiSrc >= 0) &&
2494          "Failed to find a rotated input vector!");
2495 
2496   return Rotation;
2497 }
2498 
2499 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2500                                    const RISCVSubtarget &Subtarget) {
2501   SDValue V1 = Op.getOperand(0);
2502   SDValue V2 = Op.getOperand(1);
2503   SDLoc DL(Op);
2504   MVT XLenVT = Subtarget.getXLenVT();
2505   MVT VT = Op.getSimpleValueType();
2506   unsigned NumElts = VT.getVectorNumElements();
2507   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2508 
2509   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2510 
2511   SDValue TrueMask, VL;
2512   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2513 
2514   if (SVN->isSplat()) {
2515     const int Lane = SVN->getSplatIndex();
2516     if (Lane >= 0) {
2517       MVT SVT = VT.getVectorElementType();
2518 
2519       // Turn splatted vector load into a strided load with an X0 stride.
2520       SDValue V = V1;
2521       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2522       // with undef.
2523       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2524       int Offset = Lane;
2525       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2526         int OpElements =
2527             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2528         V = V.getOperand(Offset / OpElements);
2529         Offset %= OpElements;
2530       }
2531 
2532       // We need to ensure the load isn't atomic or volatile.
2533       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2534         auto *Ld = cast<LoadSDNode>(V);
2535         Offset *= SVT.getStoreSize();
2536         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2537                                                    TypeSize::Fixed(Offset), DL);
2538 
2539         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2540         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2541           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2542           SDValue IntID =
2543               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2544           SDValue Ops[] = {Ld->getChain(),
2545                            IntID,
2546                            DAG.getUNDEF(ContainerVT),
2547                            NewAddr,
2548                            DAG.getRegister(RISCV::X0, XLenVT),
2549                            VL};
2550           SDValue NewLoad = DAG.getMemIntrinsicNode(
2551               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2552               DAG.getMachineFunction().getMachineMemOperand(
2553                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2554           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2555           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2556         }
2557 
2558         // Otherwise use a scalar load and splat. This will give the best
2559         // opportunity to fold a splat into the operation. ISel can turn it into
2560         // the x0 strided load if we aren't able to fold away the select.
2561         if (SVT.isFloatingPoint())
2562           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2563                           Ld->getPointerInfo().getWithOffset(Offset),
2564                           Ld->getOriginalAlign(),
2565                           Ld->getMemOperand()->getFlags());
2566         else
2567           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2568                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2569                              Ld->getOriginalAlign(),
2570                              Ld->getMemOperand()->getFlags());
2571         DAG.makeEquivalentMemoryOrdering(Ld, V);
2572 
2573         unsigned Opc =
2574             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2575         SDValue Splat =
2576             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2577         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2578       }
2579 
2580       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2581       assert(Lane < (int)NumElts && "Unexpected lane!");
2582       SDValue Gather =
2583           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2584                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2585       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2586     }
2587   }
2588 
2589   ArrayRef<int> Mask = SVN->getMask();
2590 
2591   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2592   // be undef which can be handled with a single SLIDEDOWN/UP.
2593   int LoSrc, HiSrc;
2594   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2595   if (Rotation > 0) {
2596     SDValue LoV, HiV;
2597     if (LoSrc >= 0) {
2598       LoV = LoSrc == 0 ? V1 : V2;
2599       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2600     }
2601     if (HiSrc >= 0) {
2602       HiV = HiSrc == 0 ? V1 : V2;
2603       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2604     }
2605 
2606     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2607     // to slide LoV up by (NumElts - Rotation).
2608     unsigned InvRotate = NumElts - Rotation;
2609 
2610     SDValue Res = DAG.getUNDEF(ContainerVT);
2611     if (HiV) {
2612       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2613       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2614       // causes multiple vsetvlis in some test cases such as lowering
2615       // reduce.mul
2616       SDValue DownVL = VL;
2617       if (LoV)
2618         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2619       Res =
2620           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2621                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2622     }
2623     if (LoV)
2624       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2625                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2626 
2627     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2628   }
2629 
2630   // Detect an interleave shuffle and lower to
2631   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2632   bool SwapSources;
2633   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2634     // Swap sources if needed.
2635     if (SwapSources)
2636       std::swap(V1, V2);
2637 
2638     // Extract the lower half of the vectors.
2639     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2640     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2641                      DAG.getConstant(0, DL, XLenVT));
2642     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2643                      DAG.getConstant(0, DL, XLenVT));
2644 
2645     // Double the element width and halve the number of elements in an int type.
2646     unsigned EltBits = VT.getScalarSizeInBits();
2647     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2648     MVT WideIntVT =
2649         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2650     // Convert this to a scalable vector. We need to base this on the
2651     // destination size to ensure there's always a type with a smaller LMUL.
2652     MVT WideIntContainerVT =
2653         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2654 
2655     // Convert sources to scalable vectors with the same element count as the
2656     // larger type.
2657     MVT HalfContainerVT = MVT::getVectorVT(
2658         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2659     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2660     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2661 
2662     // Cast sources to integer.
2663     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2664     MVT IntHalfVT =
2665         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2666     V1 = DAG.getBitcast(IntHalfVT, V1);
2667     V2 = DAG.getBitcast(IntHalfVT, V2);
2668 
2669     // Freeze V2 since we use it twice and we need to be sure that the add and
2670     // multiply see the same value.
2671     V2 = DAG.getFreeze(V2);
2672 
2673     // Recreate TrueMask using the widened type's element count.
2674     TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG);
2675 
2676     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2677     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2678                               V2, TrueMask, VL);
2679     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2680     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2681                                      DAG.getUNDEF(IntHalfVT),
2682                                      DAG.getAllOnesConstant(DL, XLenVT));
2683     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2684                                    V2, Multiplier, TrueMask, VL);
2685     // Add the new copies to our previous addition giving us 2^eltbits copies of
2686     // V2. This is equivalent to shifting V2 left by eltbits. This should
2687     // combine with the vwmulu.vv above to form vwmaccu.vv.
2688     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2689                       TrueMask, VL);
2690     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2691     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2692     // vector VT.
2693     ContainerVT =
2694         MVT::getVectorVT(VT.getVectorElementType(),
2695                          WideIntContainerVT.getVectorElementCount() * 2);
2696     Add = DAG.getBitcast(ContainerVT, Add);
2697     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2698   }
2699 
2700   // Detect shuffles which can be re-expressed as vector selects; these are
2701   // shuffles in which each element in the destination is taken from an element
2702   // at the corresponding index in either source vectors.
2703   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2704     int MaskIndex = MaskIdx.value();
2705     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2706   });
2707 
2708   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2709 
2710   SmallVector<SDValue> MaskVals;
2711   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2712   // merged with a second vrgather.
2713   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2714 
2715   // By default we preserve the original operand order, and use a mask to
2716   // select LHS as true and RHS as false. However, since RVV vector selects may
2717   // feature splats but only on the LHS, we may choose to invert our mask and
2718   // instead select between RHS and LHS.
2719   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2720   bool InvertMask = IsSelect == SwapOps;
2721 
2722   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2723   // half.
2724   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2725 
2726   // Now construct the mask that will be used by the vselect or blended
2727   // vrgather operation. For vrgathers, construct the appropriate indices into
2728   // each vector.
2729   for (int MaskIndex : Mask) {
2730     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2731     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2732     if (!IsSelect) {
2733       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2734       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2735                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2736                                      : DAG.getUNDEF(XLenVT));
2737       GatherIndicesRHS.push_back(
2738           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2739                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2740       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2741         ++LHSIndexCounts[MaskIndex];
2742       if (!IsLHSOrUndefIndex)
2743         ++RHSIndexCounts[MaskIndex - NumElts];
2744     }
2745   }
2746 
2747   if (SwapOps) {
2748     std::swap(V1, V2);
2749     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2750   }
2751 
2752   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2753   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2754   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2755 
2756   if (IsSelect)
2757     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2758 
2759   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2760     // On such a large vector we're unable to use i8 as the index type.
2761     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2762     // may involve vector splitting if we're already at LMUL=8, or our
2763     // user-supplied maximum fixed-length LMUL.
2764     return SDValue();
2765   }
2766 
2767   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2768   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2769   MVT IndexVT = VT.changeTypeToInteger();
2770   // Since we can't introduce illegal index types at this stage, use i16 and
2771   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2772   // than XLenVT.
2773   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2774     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2775     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2776   }
2777 
2778   MVT IndexContainerVT =
2779       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2780 
2781   SDValue Gather;
2782   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2783   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2784   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2785     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2786                               Subtarget);
2787   } else {
2788     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2789     // If only one index is used, we can use a "splat" vrgather.
2790     // TODO: We can splat the most-common index and fix-up any stragglers, if
2791     // that's beneficial.
2792     if (LHSIndexCounts.size() == 1) {
2793       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2794       Gather =
2795           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2796                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2797     } else {
2798       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2799       LHSIndices =
2800           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2801 
2802       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2803                            TrueMask, VL);
2804     }
2805   }
2806 
2807   // If a second vector operand is used by this shuffle, blend it in with an
2808   // additional vrgather.
2809   if (!V2.isUndef()) {
2810     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2811     // If only one index is used, we can use a "splat" vrgather.
2812     // TODO: We can splat the most-common index and fix-up any stragglers, if
2813     // that's beneficial.
2814     if (RHSIndexCounts.size() == 1) {
2815       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2816       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2817                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2818     } else {
2819       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2820       RHSIndices =
2821           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2822       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2823                        VL);
2824     }
2825 
2826     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2827     SelectMask =
2828         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2829 
2830     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2831                          Gather, VL);
2832   }
2833 
2834   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2835 }
2836 
2837 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2838   // Support splats for any type. These should type legalize well.
2839   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2840     return true;
2841 
2842   // Only support legal VTs for other shuffles for now.
2843   if (!isTypeLegal(VT))
2844     return false;
2845 
2846   MVT SVT = VT.getSimpleVT();
2847 
2848   bool SwapSources;
2849   int LoSrc, HiSrc;
2850   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2851          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2852 }
2853 
2854 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2855 // the exponent.
2856 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2857   MVT VT = Op.getSimpleValueType();
2858   unsigned EltSize = VT.getScalarSizeInBits();
2859   SDValue Src = Op.getOperand(0);
2860   SDLoc DL(Op);
2861 
2862   // We need a FP type that can represent the value.
2863   // TODO: Use f16 for i8 when possible?
2864   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2865   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2866 
2867   // Legal types should have been checked in the RISCVTargetLowering
2868   // constructor.
2869   // TODO: Splitting may make sense in some cases.
2870   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2871          "Expected legal float type!");
2872 
2873   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2874   // The trailing zero count is equal to log2 of this single bit value.
2875   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2876     SDValue Neg =
2877         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2878     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2879   }
2880 
2881   // We have a legal FP type, convert to it.
2882   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2883   // Bitcast to integer and shift the exponent to the LSB.
2884   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2885   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2886   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2887   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2888                               DAG.getConstant(ShiftAmt, DL, IntVT));
2889   // Truncate back to original type to allow vnsrl.
2890   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2891   // The exponent contains log2 of the value in biased form.
2892   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2893 
2894   // For trailing zeros, we just need to subtract the bias.
2895   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2896     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2897                        DAG.getConstant(ExponentBias, DL, VT));
2898 
2899   // For leading zeros, we need to remove the bias and convert from log2 to
2900   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2901   unsigned Adjust = ExponentBias + (EltSize - 1);
2902   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2903 }
2904 
2905 // While RVV has alignment restrictions, we should always be able to load as a
2906 // legal equivalently-sized byte-typed vector instead. This method is
2907 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2908 // the load is already correctly-aligned, it returns SDValue().
2909 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2910                                                     SelectionDAG &DAG) const {
2911   auto *Load = cast<LoadSDNode>(Op);
2912   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2913 
2914   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2915                                      Load->getMemoryVT(),
2916                                      *Load->getMemOperand()))
2917     return SDValue();
2918 
2919   SDLoc DL(Op);
2920   MVT VT = Op.getSimpleValueType();
2921   unsigned EltSizeBits = VT.getScalarSizeInBits();
2922   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2923          "Unexpected unaligned RVV load type");
2924   MVT NewVT =
2925       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2926   assert(NewVT.isValid() &&
2927          "Expecting equally-sized RVV vector types to be legal");
2928   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2929                           Load->getPointerInfo(), Load->getOriginalAlign(),
2930                           Load->getMemOperand()->getFlags());
2931   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2932 }
2933 
2934 // While RVV has alignment restrictions, we should always be able to store as a
2935 // legal equivalently-sized byte-typed vector instead. This method is
2936 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2937 // returns SDValue() if the store is already correctly aligned.
2938 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2939                                                      SelectionDAG &DAG) const {
2940   auto *Store = cast<StoreSDNode>(Op);
2941   assert(Store && Store->getValue().getValueType().isVector() &&
2942          "Expected vector store");
2943 
2944   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2945                                      Store->getMemoryVT(),
2946                                      *Store->getMemOperand()))
2947     return SDValue();
2948 
2949   SDLoc DL(Op);
2950   SDValue StoredVal = Store->getValue();
2951   MVT VT = StoredVal.getSimpleValueType();
2952   unsigned EltSizeBits = VT.getScalarSizeInBits();
2953   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2954          "Unexpected unaligned RVV store type");
2955   MVT NewVT =
2956       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2957   assert(NewVT.isValid() &&
2958          "Expecting equally-sized RVV vector types to be legal");
2959   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2960   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2961                       Store->getPointerInfo(), Store->getOriginalAlign(),
2962                       Store->getMemOperand()->getFlags());
2963 }
2964 
2965 static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
2966                              const RISCVSubtarget &Subtarget) {
2967   assert(Op.getValueType() == MVT::i64 && "Unexpected VT");
2968 
2969   int64_t Imm = cast<ConstantSDNode>(Op)->getSExtValue();
2970 
2971   // All simm32 constants should be handled by isel.
2972   // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
2973   // this check redundant, but small immediates are common so this check
2974   // should have better compile time.
2975   if (isInt<32>(Imm))
2976     return Op;
2977 
2978   // We only need to cost the immediate, if constant pool lowering is enabled.
2979   if (!Subtarget.useConstantPoolForLargeInts())
2980     return Op;
2981 
2982   RISCVMatInt::InstSeq Seq =
2983       RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits());
2984   if (Seq.size() <= Subtarget.getMaxBuildIntsCost())
2985     return Op;
2986 
2987   // Expand to a constant pool using the default expansion code.
2988   return SDValue();
2989 }
2990 
2991 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2992                                             SelectionDAG &DAG) const {
2993   switch (Op.getOpcode()) {
2994   default:
2995     report_fatal_error("unimplemented operand");
2996   case ISD::GlobalAddress:
2997     return lowerGlobalAddress(Op, DAG);
2998   case ISD::BlockAddress:
2999     return lowerBlockAddress(Op, DAG);
3000   case ISD::ConstantPool:
3001     return lowerConstantPool(Op, DAG);
3002   case ISD::JumpTable:
3003     return lowerJumpTable(Op, DAG);
3004   case ISD::GlobalTLSAddress:
3005     return lowerGlobalTLSAddress(Op, DAG);
3006   case ISD::Constant:
3007     return lowerConstant(Op, DAG, Subtarget);
3008   case ISD::SELECT:
3009     return lowerSELECT(Op, DAG);
3010   case ISD::BRCOND:
3011     return lowerBRCOND(Op, DAG);
3012   case ISD::VASTART:
3013     return lowerVASTART(Op, DAG);
3014   case ISD::FRAMEADDR:
3015     return lowerFRAMEADDR(Op, DAG);
3016   case ISD::RETURNADDR:
3017     return lowerRETURNADDR(Op, DAG);
3018   case ISD::SHL_PARTS:
3019     return lowerShiftLeftParts(Op, DAG);
3020   case ISD::SRA_PARTS:
3021     return lowerShiftRightParts(Op, DAG, true);
3022   case ISD::SRL_PARTS:
3023     return lowerShiftRightParts(Op, DAG, false);
3024   case ISD::BITCAST: {
3025     SDLoc DL(Op);
3026     EVT VT = Op.getValueType();
3027     SDValue Op0 = Op.getOperand(0);
3028     EVT Op0VT = Op0.getValueType();
3029     MVT XLenVT = Subtarget.getXLenVT();
3030     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3031       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3032       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3033       return FPConv;
3034     }
3035     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3036         Subtarget.hasStdExtF()) {
3037       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3038       SDValue FPConv =
3039           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3040       return FPConv;
3041     }
3042 
3043     // Consider other scalar<->scalar casts as legal if the types are legal.
3044     // Otherwise expand them.
3045     if (!VT.isVector() && !Op0VT.isVector()) {
3046       if (isTypeLegal(VT) && isTypeLegal(Op0VT))
3047         return Op;
3048       return SDValue();
3049     }
3050 
3051     assert(!VT.isScalableVector() && !Op0VT.isScalableVector() &&
3052            "Unexpected types");
3053 
3054     if (VT.isFixedLengthVector()) {
3055       // We can handle fixed length vector bitcasts with a simple replacement
3056       // in isel.
3057       if (Op0VT.isFixedLengthVector())
3058         return Op;
3059       // When bitcasting from scalar to fixed-length vector, insert the scalar
3060       // into a one-element vector of the result type, and perform a vector
3061       // bitcast.
3062       if (!Op0VT.isVector()) {
3063         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3064         if (!isTypeLegal(BVT))
3065           return SDValue();
3066         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3067                                               DAG.getUNDEF(BVT), Op0,
3068                                               DAG.getConstant(0, DL, XLenVT)));
3069       }
3070       return SDValue();
3071     }
3072     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3073     // thus: bitcast the vector to a one-element vector type whose element type
3074     // is the same as the result type, and extract the first element.
3075     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3076       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3077       if (!isTypeLegal(BVT))
3078         return SDValue();
3079       SDValue BVec = DAG.getBitcast(BVT, Op0);
3080       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3081                          DAG.getConstant(0, DL, XLenVT));
3082     }
3083     return SDValue();
3084   }
3085   case ISD::INTRINSIC_WO_CHAIN:
3086     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3087   case ISD::INTRINSIC_W_CHAIN:
3088     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3089   case ISD::INTRINSIC_VOID:
3090     return LowerINTRINSIC_VOID(Op, DAG);
3091   case ISD::BSWAP:
3092   case ISD::BITREVERSE: {
3093     MVT VT = Op.getSimpleValueType();
3094     SDLoc DL(Op);
3095     if (Subtarget.hasStdExtZbp()) {
3096       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3097       // Start with the maximum immediate value which is the bitwidth - 1.
3098       unsigned Imm = VT.getSizeInBits() - 1;
3099       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3100       if (Op.getOpcode() == ISD::BSWAP)
3101         Imm &= ~0x7U;
3102       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3103                          DAG.getConstant(Imm, DL, VT));
3104     }
3105     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3106     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3107     // Expand bitreverse to a bswap(rev8) followed by brev8.
3108     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3109     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3110     // as brev8 by an isel pattern.
3111     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3112                        DAG.getConstant(7, DL, VT));
3113   }
3114   case ISD::FSHL:
3115   case ISD::FSHR: {
3116     MVT VT = Op.getSimpleValueType();
3117     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3118     SDLoc DL(Op);
3119     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3120     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3121     // accidentally setting the extra bit.
3122     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3123     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3124                                 DAG.getConstant(ShAmtWidth, DL, VT));
3125     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3126     // instruction use different orders. fshl will return its first operand for
3127     // shift of zero, fshr will return its second operand. fsl and fsr both
3128     // return rs1 so the ISD nodes need to have different operand orders.
3129     // Shift amount is in rs2.
3130     SDValue Op0 = Op.getOperand(0);
3131     SDValue Op1 = Op.getOperand(1);
3132     unsigned Opc = RISCVISD::FSL;
3133     if (Op.getOpcode() == ISD::FSHR) {
3134       std::swap(Op0, Op1);
3135       Opc = RISCVISD::FSR;
3136     }
3137     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3138   }
3139   case ISD::TRUNCATE:
3140     // Only custom-lower vector truncates
3141     if (!Op.getSimpleValueType().isVector())
3142       return Op;
3143     return lowerVectorTruncLike(Op, DAG);
3144   case ISD::ANY_EXTEND:
3145   case ISD::ZERO_EXTEND:
3146     if (Op.getOperand(0).getValueType().isVector() &&
3147         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3148       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3149     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3150   case ISD::SIGN_EXTEND:
3151     if (Op.getOperand(0).getValueType().isVector() &&
3152         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3153       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3154     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3155   case ISD::SPLAT_VECTOR_PARTS:
3156     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3157   case ISD::INSERT_VECTOR_ELT:
3158     return lowerINSERT_VECTOR_ELT(Op, DAG);
3159   case ISD::EXTRACT_VECTOR_ELT:
3160     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3161   case ISD::VSCALE: {
3162     MVT VT = Op.getSimpleValueType();
3163     SDLoc DL(Op);
3164     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3165     // We define our scalable vector types for lmul=1 to use a 64 bit known
3166     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3167     // vscale as VLENB / 8.
3168     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3169     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3170       report_fatal_error("Support for VLEN==32 is incomplete.");
3171     // We assume VLENB is a multiple of 8. We manually choose the best shift
3172     // here because SimplifyDemandedBits isn't always able to simplify it.
3173     uint64_t Val = Op.getConstantOperandVal(0);
3174     if (isPowerOf2_64(Val)) {
3175       uint64_t Log2 = Log2_64(Val);
3176       if (Log2 < 3)
3177         return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3178                            DAG.getConstant(3 - Log2, DL, VT));
3179       if (Log2 > 3)
3180         return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3181                            DAG.getConstant(Log2 - 3, DL, VT));
3182       return VLENB;
3183     }
3184     // If the multiplier is a multiple of 8, scale it down to avoid needing
3185     // to shift the VLENB value.
3186     if ((Val % 8) == 0)
3187       return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3188                          DAG.getConstant(Val / 8, DL, VT));
3189 
3190     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3191                                  DAG.getConstant(3, DL, VT));
3192     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3193   }
3194   case ISD::FPOWI: {
3195     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3196     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3197     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3198         Op.getOperand(1).getValueType() == MVT::i32) {
3199       SDLoc DL(Op);
3200       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3201       SDValue Powi =
3202           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3203       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3204                          DAG.getIntPtrConstant(0, DL));
3205     }
3206     return SDValue();
3207   }
3208   case ISD::FP_EXTEND:
3209   case ISD::FP_ROUND:
3210     if (!Op.getValueType().isVector())
3211       return Op;
3212     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3213   case ISD::FP_TO_SINT:
3214   case ISD::FP_TO_UINT:
3215   case ISD::SINT_TO_FP:
3216   case ISD::UINT_TO_FP: {
3217     // RVV can only do fp<->int conversions to types half/double the size as
3218     // the source. We custom-lower any conversions that do two hops into
3219     // sequences.
3220     MVT VT = Op.getSimpleValueType();
3221     if (!VT.isVector())
3222       return Op;
3223     SDLoc DL(Op);
3224     SDValue Src = Op.getOperand(0);
3225     MVT EltVT = VT.getVectorElementType();
3226     MVT SrcVT = Src.getSimpleValueType();
3227     MVT SrcEltVT = SrcVT.getVectorElementType();
3228     unsigned EltSize = EltVT.getSizeInBits();
3229     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3230     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3231            "Unexpected vector element types");
3232 
3233     bool IsInt2FP = SrcEltVT.isInteger();
3234     // Widening conversions
3235     if (EltSize > (2 * SrcEltSize)) {
3236       if (IsInt2FP) {
3237         // Do a regular integer sign/zero extension then convert to float.
3238         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3239                                       VT.getVectorElementCount());
3240         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3241                                  ? ISD::ZERO_EXTEND
3242                                  : ISD::SIGN_EXTEND;
3243         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3244         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3245       }
3246       // FP2Int
3247       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3248       // Do one doubling fp_extend then complete the operation by converting
3249       // to int.
3250       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3251       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3252       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3253     }
3254 
3255     // Narrowing conversions
3256     if (SrcEltSize > (2 * EltSize)) {
3257       if (IsInt2FP) {
3258         // One narrowing int_to_fp, then an fp_round.
3259         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3260         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3261         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3262         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3263       }
3264       // FP2Int
3265       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3266       // representable by the integer, the result is poison.
3267       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3268                                     VT.getVectorElementCount());
3269       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3270       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3271     }
3272 
3273     // Scalable vectors can exit here. Patterns will handle equally-sized
3274     // conversions halving/doubling ones.
3275     if (!VT.isFixedLengthVector())
3276       return Op;
3277 
3278     // For fixed-length vectors we lower to a custom "VL" node.
3279     unsigned RVVOpc = 0;
3280     switch (Op.getOpcode()) {
3281     default:
3282       llvm_unreachable("Impossible opcode");
3283     case ISD::FP_TO_SINT:
3284       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3285       break;
3286     case ISD::FP_TO_UINT:
3287       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3288       break;
3289     case ISD::SINT_TO_FP:
3290       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3291       break;
3292     case ISD::UINT_TO_FP:
3293       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3294       break;
3295     }
3296 
3297     MVT ContainerVT, SrcContainerVT;
3298     // Derive the reference container type from the larger vector type.
3299     if (SrcEltSize > EltSize) {
3300       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3301       ContainerVT =
3302           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3303     } else {
3304       ContainerVT = getContainerForFixedLengthVector(VT);
3305       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3306     }
3307 
3308     SDValue Mask, VL;
3309     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3310 
3311     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3312     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3313     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3314   }
3315   case ISD::FP_TO_SINT_SAT:
3316   case ISD::FP_TO_UINT_SAT:
3317     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3318   case ISD::FTRUNC:
3319   case ISD::FCEIL:
3320   case ISD::FFLOOR:
3321     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3322   case ISD::FROUND:
3323     return lowerFROUND(Op, DAG);
3324   case ISD::VECREDUCE_ADD:
3325   case ISD::VECREDUCE_UMAX:
3326   case ISD::VECREDUCE_SMAX:
3327   case ISD::VECREDUCE_UMIN:
3328   case ISD::VECREDUCE_SMIN:
3329     return lowerVECREDUCE(Op, DAG);
3330   case ISD::VECREDUCE_AND:
3331   case ISD::VECREDUCE_OR:
3332   case ISD::VECREDUCE_XOR:
3333     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3334       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3335     return lowerVECREDUCE(Op, DAG);
3336   case ISD::VECREDUCE_FADD:
3337   case ISD::VECREDUCE_SEQ_FADD:
3338   case ISD::VECREDUCE_FMIN:
3339   case ISD::VECREDUCE_FMAX:
3340     return lowerFPVECREDUCE(Op, DAG);
3341   case ISD::VP_REDUCE_ADD:
3342   case ISD::VP_REDUCE_UMAX:
3343   case ISD::VP_REDUCE_SMAX:
3344   case ISD::VP_REDUCE_UMIN:
3345   case ISD::VP_REDUCE_SMIN:
3346   case ISD::VP_REDUCE_FADD:
3347   case ISD::VP_REDUCE_SEQ_FADD:
3348   case ISD::VP_REDUCE_FMIN:
3349   case ISD::VP_REDUCE_FMAX:
3350     return lowerVPREDUCE(Op, DAG);
3351   case ISD::VP_REDUCE_AND:
3352   case ISD::VP_REDUCE_OR:
3353   case ISD::VP_REDUCE_XOR:
3354     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3355       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3356     return lowerVPREDUCE(Op, DAG);
3357   case ISD::INSERT_SUBVECTOR:
3358     return lowerINSERT_SUBVECTOR(Op, DAG);
3359   case ISD::EXTRACT_SUBVECTOR:
3360     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3361   case ISD::STEP_VECTOR:
3362     return lowerSTEP_VECTOR(Op, DAG);
3363   case ISD::VECTOR_REVERSE:
3364     return lowerVECTOR_REVERSE(Op, DAG);
3365   case ISD::VECTOR_SPLICE:
3366     return lowerVECTOR_SPLICE(Op, DAG);
3367   case ISD::BUILD_VECTOR:
3368     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3369   case ISD::SPLAT_VECTOR:
3370     if (Op.getValueType().getVectorElementType() == MVT::i1)
3371       return lowerVectorMaskSplat(Op, DAG);
3372     return SDValue();
3373   case ISD::VECTOR_SHUFFLE:
3374     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3375   case ISD::CONCAT_VECTORS: {
3376     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3377     // better than going through the stack, as the default expansion does.
3378     SDLoc DL(Op);
3379     MVT VT = Op.getSimpleValueType();
3380     unsigned NumOpElts =
3381         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3382     SDValue Vec = DAG.getUNDEF(VT);
3383     for (const auto &OpIdx : enumerate(Op->ops())) {
3384       SDValue SubVec = OpIdx.value();
3385       // Don't insert undef subvectors.
3386       if (SubVec.isUndef())
3387         continue;
3388       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3389                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3390     }
3391     return Vec;
3392   }
3393   case ISD::LOAD:
3394     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3395       return V;
3396     if (Op.getValueType().isFixedLengthVector())
3397       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3398     return Op;
3399   case ISD::STORE:
3400     if (auto V = expandUnalignedRVVStore(Op, DAG))
3401       return V;
3402     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3403       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3404     return Op;
3405   case ISD::MLOAD:
3406   case ISD::VP_LOAD:
3407     return lowerMaskedLoad(Op, DAG);
3408   case ISD::MSTORE:
3409   case ISD::VP_STORE:
3410     return lowerMaskedStore(Op, DAG);
3411   case ISD::SETCC:
3412     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3413   case ISD::ADD:
3414     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3415   case ISD::SUB:
3416     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3417   case ISD::MUL:
3418     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3419   case ISD::MULHS:
3420     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3421   case ISD::MULHU:
3422     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3423   case ISD::AND:
3424     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3425                                               RISCVISD::AND_VL);
3426   case ISD::OR:
3427     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3428                                               RISCVISD::OR_VL);
3429   case ISD::XOR:
3430     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3431                                               RISCVISD::XOR_VL);
3432   case ISD::SDIV:
3433     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3434   case ISD::SREM:
3435     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3436   case ISD::UDIV:
3437     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3438   case ISD::UREM:
3439     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3440   case ISD::SHL:
3441   case ISD::SRA:
3442   case ISD::SRL:
3443     if (Op.getSimpleValueType().isFixedLengthVector())
3444       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3445     // This can be called for an i32 shift amount that needs to be promoted.
3446     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3447            "Unexpected custom legalisation");
3448     return SDValue();
3449   case ISD::SADDSAT:
3450     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3451   case ISD::UADDSAT:
3452     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3453   case ISD::SSUBSAT:
3454     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3455   case ISD::USUBSAT:
3456     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3457   case ISD::FADD:
3458     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3459   case ISD::FSUB:
3460     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3461   case ISD::FMUL:
3462     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3463   case ISD::FDIV:
3464     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3465   case ISD::FNEG:
3466     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3467   case ISD::FABS:
3468     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3469   case ISD::FSQRT:
3470     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3471   case ISD::FMA:
3472     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3473   case ISD::SMIN:
3474     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3475   case ISD::SMAX:
3476     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3477   case ISD::UMIN:
3478     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3479   case ISD::UMAX:
3480     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3481   case ISD::FMINNUM:
3482     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3483   case ISD::FMAXNUM:
3484     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3485   case ISD::ABS:
3486     return lowerABS(Op, DAG);
3487   case ISD::CTLZ_ZERO_UNDEF:
3488   case ISD::CTTZ_ZERO_UNDEF:
3489     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3490   case ISD::VSELECT:
3491     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3492   case ISD::FCOPYSIGN:
3493     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3494   case ISD::MGATHER:
3495   case ISD::VP_GATHER:
3496     return lowerMaskedGather(Op, DAG);
3497   case ISD::MSCATTER:
3498   case ISD::VP_SCATTER:
3499     return lowerMaskedScatter(Op, DAG);
3500   case ISD::FLT_ROUNDS_:
3501     return lowerGET_ROUNDING(Op, DAG);
3502   case ISD::SET_ROUNDING:
3503     return lowerSET_ROUNDING(Op, DAG);
3504   case ISD::EH_DWARF_CFA:
3505     return lowerEH_DWARF_CFA(Op, DAG);
3506   case ISD::VP_SELECT:
3507     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3508   case ISD::VP_MERGE:
3509     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3510   case ISD::VP_ADD:
3511     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3512   case ISD::VP_SUB:
3513     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3514   case ISD::VP_MUL:
3515     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3516   case ISD::VP_SDIV:
3517     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3518   case ISD::VP_UDIV:
3519     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3520   case ISD::VP_SREM:
3521     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3522   case ISD::VP_UREM:
3523     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3524   case ISD::VP_AND:
3525     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3526   case ISD::VP_OR:
3527     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3528   case ISD::VP_XOR:
3529     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3530   case ISD::VP_ASHR:
3531     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3532   case ISD::VP_LSHR:
3533     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3534   case ISD::VP_SHL:
3535     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3536   case ISD::VP_FADD:
3537     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3538   case ISD::VP_FSUB:
3539     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3540   case ISD::VP_FMUL:
3541     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3542   case ISD::VP_FDIV:
3543     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3544   case ISD::VP_FNEG:
3545     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3546   case ISD::VP_FMA:
3547     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3548   case ISD::VP_SIGN_EXTEND:
3549   case ISD::VP_ZERO_EXTEND:
3550     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3551       return lowerVPExtMaskOp(Op, DAG);
3552     return lowerVPOp(Op, DAG,
3553                      Op.getOpcode() == ISD::VP_SIGN_EXTEND
3554                          ? RISCVISD::VSEXT_VL
3555                          : RISCVISD::VZEXT_VL);
3556   case ISD::VP_TRUNCATE:
3557     return lowerVectorTruncLike(Op, DAG);
3558   case ISD::VP_FP_EXTEND:
3559   case ISD::VP_FP_ROUND:
3560     return lowerVectorFPExtendOrRoundLike(Op, DAG);
3561   case ISD::VP_FPTOSI:
3562     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3563   case ISD::VP_FPTOUI:
3564     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3565   case ISD::VP_SITOFP:
3566     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3567   case ISD::VP_UITOFP:
3568     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3569   case ISD::VP_SETCC:
3570     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3571       return lowerVPSetCCMaskOp(Op, DAG);
3572     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3573   }
3574 }
3575 
3576 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3577                              SelectionDAG &DAG, unsigned Flags) {
3578   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3579 }
3580 
3581 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3582                              SelectionDAG &DAG, unsigned Flags) {
3583   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3584                                    Flags);
3585 }
3586 
3587 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3588                              SelectionDAG &DAG, unsigned Flags) {
3589   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3590                                    N->getOffset(), Flags);
3591 }
3592 
3593 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3594                              SelectionDAG &DAG, unsigned Flags) {
3595   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3596 }
3597 
3598 template <class NodeTy>
3599 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3600                                      bool IsLocal) const {
3601   SDLoc DL(N);
3602   EVT Ty = getPointerTy(DAG.getDataLayout());
3603 
3604   if (isPositionIndependent()) {
3605     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3606     if (IsLocal)
3607       // Use PC-relative addressing to access the symbol. This generates the
3608       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3609       // %pcrel_lo(auipc)).
3610       return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
3611 
3612     // Use PC-relative addressing to access the GOT for this symbol, then load
3613     // the address from the GOT. This generates the pattern (PseudoLA sym),
3614     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3615     SDValue Load =
3616         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3617     MachineFunction &MF = DAG.getMachineFunction();
3618     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3619         MachinePointerInfo::getGOT(MF),
3620         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3621             MachineMemOperand::MOInvariant,
3622         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3623     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3624     return Load;
3625   }
3626 
3627   switch (getTargetMachine().getCodeModel()) {
3628   default:
3629     report_fatal_error("Unsupported code model for lowering");
3630   case CodeModel::Small: {
3631     // Generate a sequence for accessing addresses within the first 2 GiB of
3632     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3633     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3634     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3635     SDValue MNHi = DAG.getNode(RISCVISD::HI, DL, Ty, AddrHi);
3636     return DAG.getNode(RISCVISD::ADD_LO, DL, Ty, MNHi, AddrLo);
3637   }
3638   case CodeModel::Medium: {
3639     // Generate a sequence for accessing addresses within any 2GiB range within
3640     // the address space. This generates the pattern (PseudoLLA sym), which
3641     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3642     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3643     return DAG.getNode(RISCVISD::LLA, DL, Ty, Addr);
3644   }
3645   }
3646 }
3647 
3648 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3649                                                 SelectionDAG &DAG) const {
3650   SDLoc DL(Op);
3651   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3652   assert(N->getOffset() == 0 && "unexpected offset in global node");
3653 
3654   const GlobalValue *GV = N->getGlobal();
3655   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3656   return getAddr(N, DAG, IsLocal);
3657 }
3658 
3659 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3660                                                SelectionDAG &DAG) const {
3661   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3662 
3663   return getAddr(N, DAG);
3664 }
3665 
3666 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3667                                                SelectionDAG &DAG) const {
3668   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3669 
3670   return getAddr(N, DAG);
3671 }
3672 
3673 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3674                                             SelectionDAG &DAG) const {
3675   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3676 
3677   return getAddr(N, DAG);
3678 }
3679 
3680 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3681                                               SelectionDAG &DAG,
3682                                               bool UseGOT) const {
3683   SDLoc DL(N);
3684   EVT Ty = getPointerTy(DAG.getDataLayout());
3685   const GlobalValue *GV = N->getGlobal();
3686   MVT XLenVT = Subtarget.getXLenVT();
3687 
3688   if (UseGOT) {
3689     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3690     // load the address from the GOT and add the thread pointer. This generates
3691     // the pattern (PseudoLA_TLS_IE sym), which expands to
3692     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3693     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3694     SDValue Load =
3695         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3696     MachineFunction &MF = DAG.getMachineFunction();
3697     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3698         MachinePointerInfo::getGOT(MF),
3699         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3700             MachineMemOperand::MOInvariant,
3701         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3702     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3703 
3704     // Add the thread pointer.
3705     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3706     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3707   }
3708 
3709   // Generate a sequence for accessing the address relative to the thread
3710   // pointer, with the appropriate adjustment for the thread pointer offset.
3711   // This generates the pattern
3712   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3713   SDValue AddrHi =
3714       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3715   SDValue AddrAdd =
3716       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3717   SDValue AddrLo =
3718       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3719 
3720   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3721   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3722   SDValue MNAdd = SDValue(
3723       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3724       0);
3725   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3726 }
3727 
3728 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3729                                                SelectionDAG &DAG) const {
3730   SDLoc DL(N);
3731   EVT Ty = getPointerTy(DAG.getDataLayout());
3732   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3733   const GlobalValue *GV = N->getGlobal();
3734 
3735   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3736   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3737   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3738   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3739   SDValue Load =
3740       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3741 
3742   // Prepare argument list to generate call.
3743   ArgListTy Args;
3744   ArgListEntry Entry;
3745   Entry.Node = Load;
3746   Entry.Ty = CallTy;
3747   Args.push_back(Entry);
3748 
3749   // Setup call to __tls_get_addr.
3750   TargetLowering::CallLoweringInfo CLI(DAG);
3751   CLI.setDebugLoc(DL)
3752       .setChain(DAG.getEntryNode())
3753       .setLibCallee(CallingConv::C, CallTy,
3754                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3755                     std::move(Args));
3756 
3757   return LowerCallTo(CLI).first;
3758 }
3759 
3760 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3761                                                    SelectionDAG &DAG) const {
3762   SDLoc DL(Op);
3763   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3764   assert(N->getOffset() == 0 && "unexpected offset in global node");
3765 
3766   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3767 
3768   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3769       CallingConv::GHC)
3770     report_fatal_error("In GHC calling convention TLS is not supported");
3771 
3772   SDValue Addr;
3773   switch (Model) {
3774   case TLSModel::LocalExec:
3775     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3776     break;
3777   case TLSModel::InitialExec:
3778     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3779     break;
3780   case TLSModel::LocalDynamic:
3781   case TLSModel::GeneralDynamic:
3782     Addr = getDynamicTLSAddr(N, DAG);
3783     break;
3784   }
3785 
3786   return Addr;
3787 }
3788 
3789 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3790   SDValue CondV = Op.getOperand(0);
3791   SDValue TrueV = Op.getOperand(1);
3792   SDValue FalseV = Op.getOperand(2);
3793   SDLoc DL(Op);
3794   MVT VT = Op.getSimpleValueType();
3795   MVT XLenVT = Subtarget.getXLenVT();
3796 
3797   // Lower vector SELECTs to VSELECTs by splatting the condition.
3798   if (VT.isVector()) {
3799     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3800     SDValue CondSplat = VT.isScalableVector()
3801                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3802                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3803     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3804   }
3805 
3806   // If the result type is XLenVT and CondV is the output of a SETCC node
3807   // which also operated on XLenVT inputs, then merge the SETCC node into the
3808   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3809   // compare+branch instructions. i.e.:
3810   // (select (setcc lhs, rhs, cc), truev, falsev)
3811   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3812   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3813       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3814     SDValue LHS = CondV.getOperand(0);
3815     SDValue RHS = CondV.getOperand(1);
3816     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3817     ISD::CondCode CCVal = CC->get();
3818 
3819     // Special case for a select of 2 constants that have a diffence of 1.
3820     // Normally this is done by DAGCombine, but if the select is introduced by
3821     // type legalization or op legalization, we miss it. Restricting to SETLT
3822     // case for now because that is what signed saturating add/sub need.
3823     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3824     // but we would probably want to swap the true/false values if the condition
3825     // is SETGE/SETLE to avoid an XORI.
3826     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3827         CCVal == ISD::SETLT) {
3828       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3829       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3830       if (TrueVal - 1 == FalseVal)
3831         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3832       if (TrueVal + 1 == FalseVal)
3833         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3834     }
3835 
3836     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3837 
3838     SDValue TargetCC = DAG.getCondCode(CCVal);
3839     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3840     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3841   }
3842 
3843   // Otherwise:
3844   // (select condv, truev, falsev)
3845   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3846   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3847   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3848 
3849   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3850 
3851   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3852 }
3853 
3854 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3855   SDValue CondV = Op.getOperand(1);
3856   SDLoc DL(Op);
3857   MVT XLenVT = Subtarget.getXLenVT();
3858 
3859   if (CondV.getOpcode() == ISD::SETCC &&
3860       CondV.getOperand(0).getValueType() == XLenVT) {
3861     SDValue LHS = CondV.getOperand(0);
3862     SDValue RHS = CondV.getOperand(1);
3863     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3864 
3865     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3866 
3867     SDValue TargetCC = DAG.getCondCode(CCVal);
3868     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3869                        LHS, RHS, TargetCC, Op.getOperand(2));
3870   }
3871 
3872   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3873                      CondV, DAG.getConstant(0, DL, XLenVT),
3874                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3875 }
3876 
3877 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3878   MachineFunction &MF = DAG.getMachineFunction();
3879   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3880 
3881   SDLoc DL(Op);
3882   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3883                                  getPointerTy(MF.getDataLayout()));
3884 
3885   // vastart just stores the address of the VarArgsFrameIndex slot into the
3886   // memory location argument.
3887   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3888   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3889                       MachinePointerInfo(SV));
3890 }
3891 
3892 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3893                                             SelectionDAG &DAG) const {
3894   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3895   MachineFunction &MF = DAG.getMachineFunction();
3896   MachineFrameInfo &MFI = MF.getFrameInfo();
3897   MFI.setFrameAddressIsTaken(true);
3898   Register FrameReg = RI.getFrameRegister(MF);
3899   int XLenInBytes = Subtarget.getXLen() / 8;
3900 
3901   EVT VT = Op.getValueType();
3902   SDLoc DL(Op);
3903   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3904   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3905   while (Depth--) {
3906     int Offset = -(XLenInBytes * 2);
3907     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3908                               DAG.getIntPtrConstant(Offset, DL));
3909     FrameAddr =
3910         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3911   }
3912   return FrameAddr;
3913 }
3914 
3915 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3916                                              SelectionDAG &DAG) const {
3917   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3918   MachineFunction &MF = DAG.getMachineFunction();
3919   MachineFrameInfo &MFI = MF.getFrameInfo();
3920   MFI.setReturnAddressIsTaken(true);
3921   MVT XLenVT = Subtarget.getXLenVT();
3922   int XLenInBytes = Subtarget.getXLen() / 8;
3923 
3924   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3925     return SDValue();
3926 
3927   EVT VT = Op.getValueType();
3928   SDLoc DL(Op);
3929   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3930   if (Depth) {
3931     int Off = -XLenInBytes;
3932     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3933     SDValue Offset = DAG.getConstant(Off, DL, VT);
3934     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3935                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3936                        MachinePointerInfo());
3937   }
3938 
3939   // Return the value of the return address register, marking it an implicit
3940   // live-in.
3941   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3942   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3943 }
3944 
3945 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3946                                                  SelectionDAG &DAG) const {
3947   SDLoc DL(Op);
3948   SDValue Lo = Op.getOperand(0);
3949   SDValue Hi = Op.getOperand(1);
3950   SDValue Shamt = Op.getOperand(2);
3951   EVT VT = Lo.getValueType();
3952 
3953   // if Shamt-XLEN < 0: // Shamt < XLEN
3954   //   Lo = Lo << Shamt
3955   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
3956   // else:
3957   //   Lo = 0
3958   //   Hi = Lo << (Shamt-XLEN)
3959 
3960   SDValue Zero = DAG.getConstant(0, DL, VT);
3961   SDValue One = DAG.getConstant(1, DL, VT);
3962   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3963   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3964   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3965   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
3966 
3967   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3968   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3969   SDValue ShiftRightLo =
3970       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3971   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3972   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3973   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3974 
3975   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3976 
3977   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3978   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3979 
3980   SDValue Parts[2] = {Lo, Hi};
3981   return DAG.getMergeValues(Parts, DL);
3982 }
3983 
3984 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3985                                                   bool IsSRA) const {
3986   SDLoc DL(Op);
3987   SDValue Lo = Op.getOperand(0);
3988   SDValue Hi = Op.getOperand(1);
3989   SDValue Shamt = Op.getOperand(2);
3990   EVT VT = Lo.getValueType();
3991 
3992   // SRA expansion:
3993   //   if Shamt-XLEN < 0: // Shamt < XLEN
3994   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
3995   //     Hi = Hi >>s Shamt
3996   //   else:
3997   //     Lo = Hi >>s (Shamt-XLEN);
3998   //     Hi = Hi >>s (XLEN-1)
3999   //
4000   // SRL expansion:
4001   //   if Shamt-XLEN < 0: // Shamt < XLEN
4002   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4003   //     Hi = Hi >>u Shamt
4004   //   else:
4005   //     Lo = Hi >>u (Shamt-XLEN);
4006   //     Hi = 0;
4007 
4008   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4009 
4010   SDValue Zero = DAG.getConstant(0, DL, VT);
4011   SDValue One = DAG.getConstant(1, DL, VT);
4012   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4013   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4014   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4015   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4016 
4017   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4018   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4019   SDValue ShiftLeftHi =
4020       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4021   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4022   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4023   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4024   SDValue HiFalse =
4025       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4026 
4027   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4028 
4029   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4030   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4031 
4032   SDValue Parts[2] = {Lo, Hi};
4033   return DAG.getMergeValues(Parts, DL);
4034 }
4035 
4036 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4037 // legal equivalently-sized i8 type, so we can use that as a go-between.
4038 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4039                                                   SelectionDAG &DAG) const {
4040   SDLoc DL(Op);
4041   MVT VT = Op.getSimpleValueType();
4042   SDValue SplatVal = Op.getOperand(0);
4043   // All-zeros or all-ones splats are handled specially.
4044   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4045     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4046     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4047   }
4048   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4049     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4050     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4051   }
4052   MVT XLenVT = Subtarget.getXLenVT();
4053   assert(SplatVal.getValueType() == XLenVT &&
4054          "Unexpected type for i1 splat value");
4055   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4056   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4057                          DAG.getConstant(1, DL, XLenVT));
4058   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4059   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4060   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4061 }
4062 
4063 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4064 // illegal (currently only vXi64 RV32).
4065 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4066 // them to VMV_V_X_VL.
4067 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4068                                                      SelectionDAG &DAG) const {
4069   SDLoc DL(Op);
4070   MVT VecVT = Op.getSimpleValueType();
4071   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4072          "Unexpected SPLAT_VECTOR_PARTS lowering");
4073 
4074   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4075   SDValue Lo = Op.getOperand(0);
4076   SDValue Hi = Op.getOperand(1);
4077 
4078   if (VecVT.isFixedLengthVector()) {
4079     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4080     SDLoc DL(Op);
4081     SDValue Mask, VL;
4082     std::tie(Mask, VL) =
4083         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4084 
4085     SDValue Res =
4086         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4087     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4088   }
4089 
4090   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4091     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4092     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4093     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4094     // node in order to try and match RVV vector/scalar instructions.
4095     if ((LoC >> 31) == HiC)
4096       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4097                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4098   }
4099 
4100   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4101   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4102       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4103       Hi.getConstantOperandVal(1) == 31)
4104     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4105                        DAG.getRegister(RISCV::X0, MVT::i32));
4106 
4107   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4108   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4109                      DAG.getUNDEF(VecVT), Lo, Hi,
4110                      DAG.getRegister(RISCV::X0, MVT::i32));
4111 }
4112 
4113 // Custom-lower extensions from mask vectors by using a vselect either with 1
4114 // for zero/any-extension or -1 for sign-extension:
4115 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4116 // Note that any-extension is lowered identically to zero-extension.
4117 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4118                                                 int64_t ExtTrueVal) const {
4119   SDLoc DL(Op);
4120   MVT VecVT = Op.getSimpleValueType();
4121   SDValue Src = Op.getOperand(0);
4122   // Only custom-lower extensions from mask types
4123   assert(Src.getValueType().isVector() &&
4124          Src.getValueType().getVectorElementType() == MVT::i1);
4125 
4126   if (VecVT.isScalableVector()) {
4127     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4128     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4129     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4130   }
4131 
4132   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4133   MVT I1ContainerVT =
4134       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4135 
4136   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4137 
4138   SDValue Mask, VL;
4139   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4140 
4141   MVT XLenVT = Subtarget.getXLenVT();
4142   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4143   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4144 
4145   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4146                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4147   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4148                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4149   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4150                                SplatTrueVal, SplatZero, VL);
4151 
4152   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4153 }
4154 
4155 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4156     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4157   MVT ExtVT = Op.getSimpleValueType();
4158   // Only custom-lower extensions from fixed-length vector types.
4159   if (!ExtVT.isFixedLengthVector())
4160     return Op;
4161   MVT VT = Op.getOperand(0).getSimpleValueType();
4162   // Grab the canonical container type for the extended type. Infer the smaller
4163   // type from that to ensure the same number of vector elements, as we know
4164   // the LMUL will be sufficient to hold the smaller type.
4165   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4166   // Get the extended container type manually to ensure the same number of
4167   // vector elements between source and dest.
4168   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4169                                      ContainerExtVT.getVectorElementCount());
4170 
4171   SDValue Op1 =
4172       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4173 
4174   SDLoc DL(Op);
4175   SDValue Mask, VL;
4176   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4177 
4178   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4179 
4180   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4181 }
4182 
4183 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4184 // setcc operation:
4185 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4186 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
4187                                                       SelectionDAG &DAG) const {
4188   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4189   SDLoc DL(Op);
4190   EVT MaskVT = Op.getValueType();
4191   // Only expect to custom-lower truncations to mask types
4192   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4193          "Unexpected type for vector mask lowering");
4194   SDValue Src = Op.getOperand(0);
4195   MVT VecVT = Src.getSimpleValueType();
4196   SDValue Mask, VL;
4197   if (IsVPTrunc) {
4198     Mask = Op.getOperand(1);
4199     VL = Op.getOperand(2);
4200   }
4201   // If this is a fixed vector, we need to convert it to a scalable vector.
4202   MVT ContainerVT = VecVT;
4203 
4204   if (VecVT.isFixedLengthVector()) {
4205     ContainerVT = getContainerForFixedLengthVector(VecVT);
4206     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4207     if (IsVPTrunc) {
4208       MVT MaskContainerVT =
4209           getContainerForFixedLengthVector(Mask.getSimpleValueType());
4210       Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget);
4211     }
4212   }
4213 
4214   if (!IsVPTrunc) {
4215     std::tie(Mask, VL) =
4216         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4217   }
4218 
4219   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4220   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4221 
4222   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4223                          DAG.getUNDEF(ContainerVT), SplatOne, VL);
4224   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4225                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4226 
4227   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4228   SDValue Trunc =
4229       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4230   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4231                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4232   if (MaskVT.isFixedLengthVector())
4233     Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4234   return Trunc;
4235 }
4236 
4237 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
4238                                                   SelectionDAG &DAG) const {
4239   bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE;
4240   SDLoc DL(Op);
4241 
4242   MVT VT = Op.getSimpleValueType();
4243   // Only custom-lower vector truncates
4244   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4245 
4246   // Truncates to mask types are handled differently
4247   if (VT.getVectorElementType() == MVT::i1)
4248     return lowerVectorMaskTruncLike(Op, DAG);
4249 
4250   // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
4251   // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
4252   // truncate by one power of two at a time.
4253   MVT DstEltVT = VT.getVectorElementType();
4254 
4255   SDValue Src = Op.getOperand(0);
4256   MVT SrcVT = Src.getSimpleValueType();
4257   MVT SrcEltVT = SrcVT.getVectorElementType();
4258 
4259   assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) &&
4260          isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
4261          "Unexpected vector truncate lowering");
4262 
4263   MVT ContainerVT = SrcVT;
4264   SDValue Mask, VL;
4265   if (IsVPTrunc) {
4266     Mask = Op.getOperand(1);
4267     VL = Op.getOperand(2);
4268   }
4269   if (SrcVT.isFixedLengthVector()) {
4270     ContainerVT = getContainerForFixedLengthVector(SrcVT);
4271     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4272     if (IsVPTrunc) {
4273       MVT MaskVT = getMaskTypeFor(ContainerVT);
4274       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4275     }
4276   }
4277 
4278   SDValue Result = Src;
4279   if (!IsVPTrunc) {
4280     std::tie(Mask, VL) =
4281         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4282   }
4283 
4284   LLVMContext &Context = *DAG.getContext();
4285   const ElementCount Count = ContainerVT.getVectorElementCount();
4286   do {
4287     SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
4288     EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
4289     Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
4290                          Mask, VL);
4291   } while (SrcEltVT != DstEltVT);
4292 
4293   if (SrcVT.isFixedLengthVector())
4294     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4295 
4296   return Result;
4297 }
4298 
4299 SDValue
4300 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
4301                                                     SelectionDAG &DAG) const {
4302   bool IsVP =
4303       Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND;
4304   bool IsExtend =
4305       Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND;
4306   // RVV can only do truncate fp to types half the size as the source. We
4307   // custom-lower f64->f16 rounds via RVV's round-to-odd float
4308   // conversion instruction.
4309   SDLoc DL(Op);
4310   MVT VT = Op.getSimpleValueType();
4311 
4312   assert(VT.isVector() && "Unexpected type for vector truncate lowering");
4313 
4314   SDValue Src = Op.getOperand(0);
4315   MVT SrcVT = Src.getSimpleValueType();
4316 
4317   bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 ||
4318                                      SrcVT.getVectorElementType() != MVT::f16);
4319   bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 ||
4320                                      SrcVT.getVectorElementType() != MVT::f64);
4321 
4322   bool IsDirectConv = IsDirectExtend || IsDirectTrunc;
4323 
4324   // Prepare any fixed-length vector operands.
4325   MVT ContainerVT = VT;
4326   SDValue Mask, VL;
4327   if (IsVP) {
4328     Mask = Op.getOperand(1);
4329     VL = Op.getOperand(2);
4330   }
4331   if (VT.isFixedLengthVector()) {
4332     MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
4333     ContainerVT =
4334         SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
4335     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
4336     if (IsVP) {
4337       MVT MaskVT = getMaskTypeFor(ContainerVT);
4338       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4339     }
4340   }
4341 
4342   if (!IsVP)
4343     std::tie(Mask, VL) =
4344         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
4345 
4346   unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL;
4347 
4348   if (IsDirectConv) {
4349     Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL);
4350     if (VT.isFixedLengthVector())
4351       Src = convertFromScalableVector(VT, Src, DAG, Subtarget);
4352     return Src;
4353   }
4354 
4355   unsigned InterConvOpc =
4356       IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL;
4357 
4358   MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
4359   SDValue IntermediateConv =
4360       DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL);
4361   SDValue Result =
4362       DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL);
4363   if (VT.isFixedLengthVector())
4364     return convertFromScalableVector(VT, Result, DAG, Subtarget);
4365   return Result;
4366 }
4367 
4368 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4369 // first position of a vector, and that vector is slid up to the insert index.
4370 // By limiting the active vector length to index+1 and merging with the
4371 // original vector (with an undisturbed tail policy for elements >= VL), we
4372 // achieve the desired result of leaving all elements untouched except the one
4373 // at VL-1, which is replaced with the desired value.
4374 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4375                                                     SelectionDAG &DAG) const {
4376   SDLoc DL(Op);
4377   MVT VecVT = Op.getSimpleValueType();
4378   SDValue Vec = Op.getOperand(0);
4379   SDValue Val = Op.getOperand(1);
4380   SDValue Idx = Op.getOperand(2);
4381 
4382   if (VecVT.getVectorElementType() == MVT::i1) {
4383     // FIXME: For now we just promote to an i8 vector and insert into that,
4384     // but this is probably not optimal.
4385     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4386     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4387     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4388     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4389   }
4390 
4391   MVT ContainerVT = VecVT;
4392   // If the operand is a fixed-length vector, convert to a scalable one.
4393   if (VecVT.isFixedLengthVector()) {
4394     ContainerVT = getContainerForFixedLengthVector(VecVT);
4395     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4396   }
4397 
4398   MVT XLenVT = Subtarget.getXLenVT();
4399 
4400   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4401   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4402   // Even i64-element vectors on RV32 can be lowered without scalar
4403   // legalization if the most-significant 32 bits of the value are not affected
4404   // by the sign-extension of the lower 32 bits.
4405   // TODO: We could also catch sign extensions of a 32-bit value.
4406   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4407     const auto *CVal = cast<ConstantSDNode>(Val);
4408     if (isInt<32>(CVal->getSExtValue())) {
4409       IsLegalInsert = true;
4410       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4411     }
4412   }
4413 
4414   SDValue Mask, VL;
4415   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4416 
4417   SDValue ValInVec;
4418 
4419   if (IsLegalInsert) {
4420     unsigned Opc =
4421         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4422     if (isNullConstant(Idx)) {
4423       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4424       if (!VecVT.isFixedLengthVector())
4425         return Vec;
4426       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4427     }
4428     ValInVec =
4429         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4430   } else {
4431     // On RV32, i64-element vectors must be specially handled to place the
4432     // value at element 0, by using two vslide1up instructions in sequence on
4433     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4434     // this.
4435     SDValue One = DAG.getConstant(1, DL, XLenVT);
4436     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4437     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4438     MVT I32ContainerVT =
4439         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4440     SDValue I32Mask =
4441         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4442     // Limit the active VL to two.
4443     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4444     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4445     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4446     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4447                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4448     // First slide in the hi value, then the lo in underneath it.
4449     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4450                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4451                            I32Mask, InsertI64VL);
4452     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4453                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4454                            I32Mask, InsertI64VL);
4455     // Bitcast back to the right container type.
4456     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4457   }
4458 
4459   // Now that the value is in a vector, slide it into position.
4460   SDValue InsertVL =
4461       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4462   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4463                                 ValInVec, Idx, Mask, InsertVL);
4464   if (!VecVT.isFixedLengthVector())
4465     return Slideup;
4466   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4467 }
4468 
4469 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4470 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4471 // types this is done using VMV_X_S to allow us to glean information about the
4472 // sign bits of the result.
4473 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4474                                                      SelectionDAG &DAG) const {
4475   SDLoc DL(Op);
4476   SDValue Idx = Op.getOperand(1);
4477   SDValue Vec = Op.getOperand(0);
4478   EVT EltVT = Op.getValueType();
4479   MVT VecVT = Vec.getSimpleValueType();
4480   MVT XLenVT = Subtarget.getXLenVT();
4481 
4482   if (VecVT.getVectorElementType() == MVT::i1) {
4483     if (VecVT.isFixedLengthVector()) {
4484       unsigned NumElts = VecVT.getVectorNumElements();
4485       if (NumElts >= 8) {
4486         MVT WideEltVT;
4487         unsigned WidenVecLen;
4488         SDValue ExtractElementIdx;
4489         SDValue ExtractBitIdx;
4490         unsigned MaxEEW = Subtarget.getELEN();
4491         MVT LargestEltVT = MVT::getIntegerVT(
4492             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4493         if (NumElts <= LargestEltVT.getSizeInBits()) {
4494           assert(isPowerOf2_32(NumElts) &&
4495                  "the number of elements should be power of 2");
4496           WideEltVT = MVT::getIntegerVT(NumElts);
4497           WidenVecLen = 1;
4498           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4499           ExtractBitIdx = Idx;
4500         } else {
4501           WideEltVT = LargestEltVT;
4502           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4503           // extract element index = index / element width
4504           ExtractElementIdx = DAG.getNode(
4505               ISD::SRL, DL, XLenVT, Idx,
4506               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4507           // mask bit index = index % element width
4508           ExtractBitIdx = DAG.getNode(
4509               ISD::AND, DL, XLenVT, Idx,
4510               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4511         }
4512         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4513         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4514         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4515                                          Vec, ExtractElementIdx);
4516         // Extract the bit from GPR.
4517         SDValue ShiftRight =
4518             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4519         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4520                            DAG.getConstant(1, DL, XLenVT));
4521       }
4522     }
4523     // Otherwise, promote to an i8 vector and extract from that.
4524     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4525     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4526     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4527   }
4528 
4529   // If this is a fixed vector, we need to convert it to a scalable vector.
4530   MVT ContainerVT = VecVT;
4531   if (VecVT.isFixedLengthVector()) {
4532     ContainerVT = getContainerForFixedLengthVector(VecVT);
4533     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4534   }
4535 
4536   // If the index is 0, the vector is already in the right position.
4537   if (!isNullConstant(Idx)) {
4538     // Use a VL of 1 to avoid processing more elements than we need.
4539     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4540     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
4541     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4542                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4543   }
4544 
4545   if (!EltVT.isInteger()) {
4546     // Floating-point extracts are handled in TableGen.
4547     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4548                        DAG.getConstant(0, DL, XLenVT));
4549   }
4550 
4551   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4552   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4553 }
4554 
4555 // Some RVV intrinsics may claim that they want an integer operand to be
4556 // promoted or expanded.
4557 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4558                                            const RISCVSubtarget &Subtarget) {
4559   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4560           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4561          "Unexpected opcode");
4562 
4563   if (!Subtarget.hasVInstructions())
4564     return SDValue();
4565 
4566   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4567   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4568   SDLoc DL(Op);
4569 
4570   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4571       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4572   if (!II || !II->hasScalarOperand())
4573     return SDValue();
4574 
4575   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4576   assert(SplatOp < Op.getNumOperands());
4577 
4578   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4579   SDValue &ScalarOp = Operands[SplatOp];
4580   MVT OpVT = ScalarOp.getSimpleValueType();
4581   MVT XLenVT = Subtarget.getXLenVT();
4582 
4583   // If this isn't a scalar, or its type is XLenVT we're done.
4584   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4585     return SDValue();
4586 
4587   // Simplest case is that the operand needs to be promoted to XLenVT.
4588   if (OpVT.bitsLT(XLenVT)) {
4589     // If the operand is a constant, sign extend to increase our chances
4590     // of being able to use a .vi instruction. ANY_EXTEND would become a
4591     // a zero extend and the simm5 check in isel would fail.
4592     // FIXME: Should we ignore the upper bits in isel instead?
4593     unsigned ExtOpc =
4594         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4595     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4596     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4597   }
4598 
4599   // Use the previous operand to get the vXi64 VT. The result might be a mask
4600   // VT for compares. Using the previous operand assumes that the previous
4601   // operand will never have a smaller element size than a scalar operand and
4602   // that a widening operation never uses SEW=64.
4603   // NOTE: If this fails the below assert, we can probably just find the
4604   // element count from any operand or result and use it to construct the VT.
4605   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4606   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4607 
4608   // The more complex case is when the scalar is larger than XLenVT.
4609   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4610          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4611 
4612   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4613   // instruction to sign-extend since SEW>XLEN.
4614   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4615     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4616     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4617   }
4618 
4619   switch (IntNo) {
4620   case Intrinsic::riscv_vslide1up:
4621   case Intrinsic::riscv_vslide1down:
4622   case Intrinsic::riscv_vslide1up_mask:
4623   case Intrinsic::riscv_vslide1down_mask: {
4624     // We need to special case these when the scalar is larger than XLen.
4625     unsigned NumOps = Op.getNumOperands();
4626     bool IsMasked = NumOps == 7;
4627 
4628     // Convert the vector source to the equivalent nxvXi32 vector.
4629     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4630     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4631 
4632     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4633                                    DAG.getConstant(0, DL, XLenVT));
4634     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4635                                    DAG.getConstant(1, DL, XLenVT));
4636 
4637     // Double the VL since we halved SEW.
4638     SDValue AVL = getVLOperand(Op);
4639     SDValue I32VL;
4640 
4641     // Optimize for constant AVL
4642     if (isa<ConstantSDNode>(AVL)) {
4643       unsigned EltSize = VT.getScalarSizeInBits();
4644       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4645 
4646       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4647       unsigned MaxVLMAX =
4648           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4649 
4650       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4651       unsigned MinVLMAX =
4652           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4653 
4654       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4655       if (AVLInt <= MinVLMAX) {
4656         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4657       } else if (AVLInt >= 2 * MaxVLMAX) {
4658         // Just set vl to VLMAX in this situation
4659         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4660         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4661         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4662         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4663         SDValue SETVLMAX = DAG.getTargetConstant(
4664             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4665         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4666                             LMUL);
4667       } else {
4668         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4669         // is related to the hardware implementation.
4670         // So let the following code handle
4671       }
4672     }
4673     if (!I32VL) {
4674       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4675       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4676       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4677       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4678       SDValue SETVL =
4679           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4680       // Using vsetvli instruction to get actually used length which related to
4681       // the hardware implementation
4682       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4683                                SEW, LMUL);
4684       I32VL =
4685           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4686     }
4687 
4688     SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG);
4689 
4690     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4691     // instructions.
4692     SDValue Passthru;
4693     if (IsMasked)
4694       Passthru = DAG.getUNDEF(I32VT);
4695     else
4696       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4697 
4698     if (IntNo == Intrinsic::riscv_vslide1up ||
4699         IntNo == Intrinsic::riscv_vslide1up_mask) {
4700       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4701                         ScalarHi, I32Mask, I32VL);
4702       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4703                         ScalarLo, I32Mask, I32VL);
4704     } else {
4705       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4706                         ScalarLo, I32Mask, I32VL);
4707       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4708                         ScalarHi, I32Mask, I32VL);
4709     }
4710 
4711     // Convert back to nxvXi64.
4712     Vec = DAG.getBitcast(VT, Vec);
4713 
4714     if (!IsMasked)
4715       return Vec;
4716     // Apply mask after the operation.
4717     SDValue Mask = Operands[NumOps - 3];
4718     SDValue MaskedOff = Operands[1];
4719     // Assume Policy operand is the last operand.
4720     uint64_t Policy =
4721         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4722     // We don't need to select maskedoff if it's undef.
4723     if (MaskedOff.isUndef())
4724       return Vec;
4725     // TAMU
4726     if (Policy == RISCVII::TAIL_AGNOSTIC)
4727       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4728                          AVL);
4729     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4730     // It's fine because vmerge does not care mask policy.
4731     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4732                        AVL);
4733   }
4734   }
4735 
4736   // We need to convert the scalar to a splat vector.
4737   SDValue VL = getVLOperand(Op);
4738   assert(VL.getValueType() == XLenVT);
4739   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4740   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4741 }
4742 
4743 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4744                                                      SelectionDAG &DAG) const {
4745   unsigned IntNo = Op.getConstantOperandVal(0);
4746   SDLoc DL(Op);
4747   MVT XLenVT = Subtarget.getXLenVT();
4748 
4749   switch (IntNo) {
4750   default:
4751     break; // Don't custom lower most intrinsics.
4752   case Intrinsic::thread_pointer: {
4753     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4754     return DAG.getRegister(RISCV::X4, PtrVT);
4755   }
4756   case Intrinsic::riscv_orc_b:
4757   case Intrinsic::riscv_brev8: {
4758     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4759     unsigned Opc =
4760         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4761     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4762                        DAG.getConstant(7, DL, XLenVT));
4763   }
4764   case Intrinsic::riscv_grev:
4765   case Intrinsic::riscv_gorc: {
4766     unsigned Opc =
4767         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4768     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4769   }
4770   case Intrinsic::riscv_zip:
4771   case Intrinsic::riscv_unzip: {
4772     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4773     // For i32 the immediate is 15. For i64 the immediate is 31.
4774     unsigned Opc =
4775         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4776     unsigned BitWidth = Op.getValueSizeInBits();
4777     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4778     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4779                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4780   }
4781   case Intrinsic::riscv_shfl:
4782   case Intrinsic::riscv_unshfl: {
4783     unsigned Opc =
4784         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4785     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4786   }
4787   case Intrinsic::riscv_bcompress:
4788   case Intrinsic::riscv_bdecompress: {
4789     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4790                                                        : RISCVISD::BDECOMPRESS;
4791     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4792   }
4793   case Intrinsic::riscv_bfp:
4794     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4795                        Op.getOperand(2));
4796   case Intrinsic::riscv_fsl:
4797     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4798                        Op.getOperand(2), Op.getOperand(3));
4799   case Intrinsic::riscv_fsr:
4800     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4801                        Op.getOperand(2), Op.getOperand(3));
4802   case Intrinsic::riscv_vmv_x_s:
4803     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4804     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4805                        Op.getOperand(1));
4806   case Intrinsic::riscv_vmv_v_x:
4807     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4808                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4809                             Subtarget);
4810   case Intrinsic::riscv_vfmv_v_f:
4811     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4812                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4813   case Intrinsic::riscv_vmv_s_x: {
4814     SDValue Scalar = Op.getOperand(2);
4815 
4816     if (Scalar.getValueType().bitsLE(XLenVT)) {
4817       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4818       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4819                          Op.getOperand(1), Scalar, Op.getOperand(3));
4820     }
4821 
4822     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4823 
4824     // This is an i64 value that lives in two scalar registers. We have to
4825     // insert this in a convoluted way. First we build vXi64 splat containing
4826     // the two values that we assemble using some bit math. Next we'll use
4827     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4828     // to merge element 0 from our splat into the source vector.
4829     // FIXME: This is probably not the best way to do this, but it is
4830     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4831     // point.
4832     //   sw lo, (a0)
4833     //   sw hi, 4(a0)
4834     //   vlse vX, (a0)
4835     //
4836     //   vid.v      vVid
4837     //   vmseq.vx   mMask, vVid, 0
4838     //   vmerge.vvm vDest, vSrc, vVal, mMask
4839     MVT VT = Op.getSimpleValueType();
4840     SDValue Vec = Op.getOperand(1);
4841     SDValue VL = getVLOperand(Op);
4842 
4843     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4844     if (Op.getOperand(1).isUndef())
4845       return SplattedVal;
4846     SDValue SplattedIdx =
4847         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4848                     DAG.getConstant(0, DL, MVT::i32), VL);
4849 
4850     MVT MaskVT = getMaskTypeFor(VT);
4851     SDValue Mask = getAllOnesMask(VT, VL, DL, DAG);
4852     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4853     SDValue SelectCond =
4854         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4855                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4856     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4857                        Vec, VL);
4858   }
4859   }
4860 
4861   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4862 }
4863 
4864 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4865                                                     SelectionDAG &DAG) const {
4866   unsigned IntNo = Op.getConstantOperandVal(1);
4867   switch (IntNo) {
4868   default:
4869     break;
4870   case Intrinsic::riscv_masked_strided_load: {
4871     SDLoc DL(Op);
4872     MVT XLenVT = Subtarget.getXLenVT();
4873 
4874     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4875     // the selection of the masked intrinsics doesn't do this for us.
4876     SDValue Mask = Op.getOperand(5);
4877     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4878 
4879     MVT VT = Op->getSimpleValueType(0);
4880     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4881 
4882     SDValue PassThru = Op.getOperand(2);
4883     if (!IsUnmasked) {
4884       MVT MaskVT = getMaskTypeFor(ContainerVT);
4885       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4886       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4887     }
4888 
4889     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4890 
4891     SDValue IntID = DAG.getTargetConstant(
4892         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4893         XLenVT);
4894 
4895     auto *Load = cast<MemIntrinsicSDNode>(Op);
4896     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4897     if (IsUnmasked)
4898       Ops.push_back(DAG.getUNDEF(ContainerVT));
4899     else
4900       Ops.push_back(PassThru);
4901     Ops.push_back(Op.getOperand(3)); // Ptr
4902     Ops.push_back(Op.getOperand(4)); // Stride
4903     if (!IsUnmasked)
4904       Ops.push_back(Mask);
4905     Ops.push_back(VL);
4906     if (!IsUnmasked) {
4907       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4908       Ops.push_back(Policy);
4909     }
4910 
4911     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4912     SDValue Result =
4913         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4914                                 Load->getMemoryVT(), Load->getMemOperand());
4915     SDValue Chain = Result.getValue(1);
4916     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4917     return DAG.getMergeValues({Result, Chain}, DL);
4918   }
4919   case Intrinsic::riscv_seg2_load:
4920   case Intrinsic::riscv_seg3_load:
4921   case Intrinsic::riscv_seg4_load:
4922   case Intrinsic::riscv_seg5_load:
4923   case Intrinsic::riscv_seg6_load:
4924   case Intrinsic::riscv_seg7_load:
4925   case Intrinsic::riscv_seg8_load: {
4926     SDLoc DL(Op);
4927     static const Intrinsic::ID VlsegInts[7] = {
4928         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4929         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4930         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4931         Intrinsic::riscv_vlseg8};
4932     unsigned NF = Op->getNumValues() - 1;
4933     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4934     MVT XLenVT = Subtarget.getXLenVT();
4935     MVT VT = Op->getSimpleValueType(0);
4936     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4937 
4938     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4939     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4940     auto *Load = cast<MemIntrinsicSDNode>(Op);
4941     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4942     ContainerVTs.push_back(MVT::Other);
4943     SDVTList VTs = DAG.getVTList(ContainerVTs);
4944     SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID};
4945     Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT));
4946     Ops.push_back(Op.getOperand(2));
4947     Ops.push_back(VL);
4948     SDValue Result =
4949         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4950                                 Load->getMemoryVT(), Load->getMemOperand());
4951     SmallVector<SDValue, 9> Results;
4952     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4953       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4954                                                   DAG, Subtarget));
4955     Results.push_back(Result.getValue(NF));
4956     return DAG.getMergeValues(Results, DL);
4957   }
4958   }
4959 
4960   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4961 }
4962 
4963 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4964                                                  SelectionDAG &DAG) const {
4965   unsigned IntNo = Op.getConstantOperandVal(1);
4966   switch (IntNo) {
4967   default:
4968     break;
4969   case Intrinsic::riscv_masked_strided_store: {
4970     SDLoc DL(Op);
4971     MVT XLenVT = Subtarget.getXLenVT();
4972 
4973     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4974     // the selection of the masked intrinsics doesn't do this for us.
4975     SDValue Mask = Op.getOperand(5);
4976     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4977 
4978     SDValue Val = Op.getOperand(2);
4979     MVT VT = Val.getSimpleValueType();
4980     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4981 
4982     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4983     if (!IsUnmasked) {
4984       MVT MaskVT = getMaskTypeFor(ContainerVT);
4985       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4986     }
4987 
4988     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4989 
4990     SDValue IntID = DAG.getTargetConstant(
4991         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4992         XLenVT);
4993 
4994     auto *Store = cast<MemIntrinsicSDNode>(Op);
4995     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4996     Ops.push_back(Val);
4997     Ops.push_back(Op.getOperand(3)); // Ptr
4998     Ops.push_back(Op.getOperand(4)); // Stride
4999     if (!IsUnmasked)
5000       Ops.push_back(Mask);
5001     Ops.push_back(VL);
5002 
5003     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
5004                                    Ops, Store->getMemoryVT(),
5005                                    Store->getMemOperand());
5006   }
5007   }
5008 
5009   return SDValue();
5010 }
5011 
5012 static MVT getLMUL1VT(MVT VT) {
5013   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
5014          "Unexpected vector MVT");
5015   return MVT::getScalableVectorVT(
5016       VT.getVectorElementType(),
5017       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
5018 }
5019 
5020 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
5021   switch (ISDOpcode) {
5022   default:
5023     llvm_unreachable("Unhandled reduction");
5024   case ISD::VECREDUCE_ADD:
5025     return RISCVISD::VECREDUCE_ADD_VL;
5026   case ISD::VECREDUCE_UMAX:
5027     return RISCVISD::VECREDUCE_UMAX_VL;
5028   case ISD::VECREDUCE_SMAX:
5029     return RISCVISD::VECREDUCE_SMAX_VL;
5030   case ISD::VECREDUCE_UMIN:
5031     return RISCVISD::VECREDUCE_UMIN_VL;
5032   case ISD::VECREDUCE_SMIN:
5033     return RISCVISD::VECREDUCE_SMIN_VL;
5034   case ISD::VECREDUCE_AND:
5035     return RISCVISD::VECREDUCE_AND_VL;
5036   case ISD::VECREDUCE_OR:
5037     return RISCVISD::VECREDUCE_OR_VL;
5038   case ISD::VECREDUCE_XOR:
5039     return RISCVISD::VECREDUCE_XOR_VL;
5040   }
5041 }
5042 
5043 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5044                                                          SelectionDAG &DAG,
5045                                                          bool IsVP) const {
5046   SDLoc DL(Op);
5047   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5048   MVT VecVT = Vec.getSimpleValueType();
5049   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5050           Op.getOpcode() == ISD::VECREDUCE_OR ||
5051           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5052           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5053           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5054           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5055          "Unexpected reduction lowering");
5056 
5057   MVT XLenVT = Subtarget.getXLenVT();
5058   assert(Op.getValueType() == XLenVT &&
5059          "Expected reduction output to be legalized to XLenVT");
5060 
5061   MVT ContainerVT = VecVT;
5062   if (VecVT.isFixedLengthVector()) {
5063     ContainerVT = getContainerForFixedLengthVector(VecVT);
5064     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5065   }
5066 
5067   SDValue Mask, VL;
5068   if (IsVP) {
5069     Mask = Op.getOperand(2);
5070     VL = Op.getOperand(3);
5071   } else {
5072     std::tie(Mask, VL) =
5073         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5074   }
5075 
5076   unsigned BaseOpc;
5077   ISD::CondCode CC;
5078   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5079 
5080   switch (Op.getOpcode()) {
5081   default:
5082     llvm_unreachable("Unhandled reduction");
5083   case ISD::VECREDUCE_AND:
5084   case ISD::VP_REDUCE_AND: {
5085     // vcpop ~x == 0
5086     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5087     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5088     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5089     CC = ISD::SETEQ;
5090     BaseOpc = ISD::AND;
5091     break;
5092   }
5093   case ISD::VECREDUCE_OR:
5094   case ISD::VP_REDUCE_OR:
5095     // vcpop x != 0
5096     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5097     CC = ISD::SETNE;
5098     BaseOpc = ISD::OR;
5099     break;
5100   case ISD::VECREDUCE_XOR:
5101   case ISD::VP_REDUCE_XOR: {
5102     // ((vcpop x) & 1) != 0
5103     SDValue One = DAG.getConstant(1, DL, XLenVT);
5104     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5105     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5106     CC = ISD::SETNE;
5107     BaseOpc = ISD::XOR;
5108     break;
5109   }
5110   }
5111 
5112   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5113 
5114   if (!IsVP)
5115     return SetCC;
5116 
5117   // Now include the start value in the operation.
5118   // Note that we must return the start value when no elements are operated
5119   // upon. The vcpop instructions we've emitted in each case above will return
5120   // 0 for an inactive vector, and so we've already received the neutral value:
5121   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5122   // can simply include the start value.
5123   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5124 }
5125 
5126 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5127                                             SelectionDAG &DAG) const {
5128   SDLoc DL(Op);
5129   SDValue Vec = Op.getOperand(0);
5130   EVT VecEVT = Vec.getValueType();
5131 
5132   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5133 
5134   // Due to ordering in legalize types we may have a vector type that needs to
5135   // be split. Do that manually so we can get down to a legal type.
5136   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5137          TargetLowering::TypeSplitVector) {
5138     SDValue Lo, Hi;
5139     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5140     VecEVT = Lo.getValueType();
5141     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5142   }
5143 
5144   // TODO: The type may need to be widened rather than split. Or widened before
5145   // it can be split.
5146   if (!isTypeLegal(VecEVT))
5147     return SDValue();
5148 
5149   MVT VecVT = VecEVT.getSimpleVT();
5150   MVT VecEltVT = VecVT.getVectorElementType();
5151   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5152 
5153   MVT ContainerVT = VecVT;
5154   if (VecVT.isFixedLengthVector()) {
5155     ContainerVT = getContainerForFixedLengthVector(VecVT);
5156     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5157   }
5158 
5159   MVT M1VT = getLMUL1VT(ContainerVT);
5160   MVT XLenVT = Subtarget.getXLenVT();
5161 
5162   SDValue Mask, VL;
5163   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5164 
5165   SDValue NeutralElem =
5166       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5167   SDValue IdentitySplat =
5168       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5169                        M1VT, DL, DAG, Subtarget);
5170   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5171                                   IdentitySplat, Mask, VL);
5172   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5173                              DAG.getConstant(0, DL, XLenVT));
5174   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5175 }
5176 
5177 // Given a reduction op, this function returns the matching reduction opcode,
5178 // the vector SDValue and the scalar SDValue required to lower this to a
5179 // RISCVISD node.
5180 static std::tuple<unsigned, SDValue, SDValue>
5181 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5182   SDLoc DL(Op);
5183   auto Flags = Op->getFlags();
5184   unsigned Opcode = Op.getOpcode();
5185   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5186   switch (Opcode) {
5187   default:
5188     llvm_unreachable("Unhandled reduction");
5189   case ISD::VECREDUCE_FADD: {
5190     // Use positive zero if we can. It is cheaper to materialize.
5191     SDValue Zero =
5192         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5193     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5194   }
5195   case ISD::VECREDUCE_SEQ_FADD:
5196     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5197                            Op.getOperand(0));
5198   case ISD::VECREDUCE_FMIN:
5199     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5200                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5201   case ISD::VECREDUCE_FMAX:
5202     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5203                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5204   }
5205 }
5206 
5207 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5208                                               SelectionDAG &DAG) const {
5209   SDLoc DL(Op);
5210   MVT VecEltVT = Op.getSimpleValueType();
5211 
5212   unsigned RVVOpcode;
5213   SDValue VectorVal, ScalarVal;
5214   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5215       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5216   MVT VecVT = VectorVal.getSimpleValueType();
5217 
5218   MVT ContainerVT = VecVT;
5219   if (VecVT.isFixedLengthVector()) {
5220     ContainerVT = getContainerForFixedLengthVector(VecVT);
5221     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5222   }
5223 
5224   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5225   MVT XLenVT = Subtarget.getXLenVT();
5226 
5227   SDValue Mask, VL;
5228   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5229 
5230   SDValue ScalarSplat =
5231       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5232                        M1VT, DL, DAG, Subtarget);
5233   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5234                                   VectorVal, ScalarSplat, Mask, VL);
5235   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5236                      DAG.getConstant(0, DL, XLenVT));
5237 }
5238 
5239 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5240   switch (ISDOpcode) {
5241   default:
5242     llvm_unreachable("Unhandled reduction");
5243   case ISD::VP_REDUCE_ADD:
5244     return RISCVISD::VECREDUCE_ADD_VL;
5245   case ISD::VP_REDUCE_UMAX:
5246     return RISCVISD::VECREDUCE_UMAX_VL;
5247   case ISD::VP_REDUCE_SMAX:
5248     return RISCVISD::VECREDUCE_SMAX_VL;
5249   case ISD::VP_REDUCE_UMIN:
5250     return RISCVISD::VECREDUCE_UMIN_VL;
5251   case ISD::VP_REDUCE_SMIN:
5252     return RISCVISD::VECREDUCE_SMIN_VL;
5253   case ISD::VP_REDUCE_AND:
5254     return RISCVISD::VECREDUCE_AND_VL;
5255   case ISD::VP_REDUCE_OR:
5256     return RISCVISD::VECREDUCE_OR_VL;
5257   case ISD::VP_REDUCE_XOR:
5258     return RISCVISD::VECREDUCE_XOR_VL;
5259   case ISD::VP_REDUCE_FADD:
5260     return RISCVISD::VECREDUCE_FADD_VL;
5261   case ISD::VP_REDUCE_SEQ_FADD:
5262     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5263   case ISD::VP_REDUCE_FMAX:
5264     return RISCVISD::VECREDUCE_FMAX_VL;
5265   case ISD::VP_REDUCE_FMIN:
5266     return RISCVISD::VECREDUCE_FMIN_VL;
5267   }
5268 }
5269 
5270 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5271                                            SelectionDAG &DAG) const {
5272   SDLoc DL(Op);
5273   SDValue Vec = Op.getOperand(1);
5274   EVT VecEVT = Vec.getValueType();
5275 
5276   // TODO: The type may need to be widened rather than split. Or widened before
5277   // it can be split.
5278   if (!isTypeLegal(VecEVT))
5279     return SDValue();
5280 
5281   MVT VecVT = VecEVT.getSimpleVT();
5282   MVT VecEltVT = VecVT.getVectorElementType();
5283   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5284 
5285   MVT ContainerVT = VecVT;
5286   if (VecVT.isFixedLengthVector()) {
5287     ContainerVT = getContainerForFixedLengthVector(VecVT);
5288     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5289   }
5290 
5291   SDValue VL = Op.getOperand(3);
5292   SDValue Mask = Op.getOperand(2);
5293 
5294   MVT M1VT = getLMUL1VT(ContainerVT);
5295   MVT XLenVT = Subtarget.getXLenVT();
5296   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5297 
5298   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5299                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5300                                         DL, DAG, Subtarget);
5301   SDValue Reduction =
5302       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5303   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5304                              DAG.getConstant(0, DL, XLenVT));
5305   if (!VecVT.isInteger())
5306     return Elt0;
5307   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5308 }
5309 
5310 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5311                                                    SelectionDAG &DAG) const {
5312   SDValue Vec = Op.getOperand(0);
5313   SDValue SubVec = Op.getOperand(1);
5314   MVT VecVT = Vec.getSimpleValueType();
5315   MVT SubVecVT = SubVec.getSimpleValueType();
5316 
5317   SDLoc DL(Op);
5318   MVT XLenVT = Subtarget.getXLenVT();
5319   unsigned OrigIdx = Op.getConstantOperandVal(2);
5320   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5321 
5322   // We don't have the ability to slide mask vectors up indexed by their i1
5323   // elements; the smallest we can do is i8. Often we are able to bitcast to
5324   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5325   // into a scalable one, we might not necessarily have enough scalable
5326   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5327   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5328       (OrigIdx != 0 || !Vec.isUndef())) {
5329     if (VecVT.getVectorMinNumElements() >= 8 &&
5330         SubVecVT.getVectorMinNumElements() >= 8) {
5331       assert(OrigIdx % 8 == 0 && "Invalid index");
5332       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5333              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5334              "Unexpected mask vector lowering");
5335       OrigIdx /= 8;
5336       SubVecVT =
5337           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5338                            SubVecVT.isScalableVector());
5339       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5340                                VecVT.isScalableVector());
5341       Vec = DAG.getBitcast(VecVT, Vec);
5342       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5343     } else {
5344       // We can't slide this mask vector up indexed by its i1 elements.
5345       // This poses a problem when we wish to insert a scalable vector which
5346       // can't be re-expressed as a larger type. Just choose the slow path and
5347       // extend to a larger type, then truncate back down.
5348       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5349       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5350       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5351       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5352       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5353                         Op.getOperand(2));
5354       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5355       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5356     }
5357   }
5358 
5359   // If the subvector vector is a fixed-length type, we cannot use subregister
5360   // manipulation to simplify the codegen; we don't know which register of a
5361   // LMUL group contains the specific subvector as we only know the minimum
5362   // register size. Therefore we must slide the vector group up the full
5363   // amount.
5364   if (SubVecVT.isFixedLengthVector()) {
5365     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5366       return Op;
5367     MVT ContainerVT = VecVT;
5368     if (VecVT.isFixedLengthVector()) {
5369       ContainerVT = getContainerForFixedLengthVector(VecVT);
5370       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5371     }
5372     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5373                          DAG.getUNDEF(ContainerVT), SubVec,
5374                          DAG.getConstant(0, DL, XLenVT));
5375     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5376       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5377       return DAG.getBitcast(Op.getValueType(), SubVec);
5378     }
5379     SDValue Mask =
5380         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5381     // Set the vector length to only the number of elements we care about. Note
5382     // that for slideup this includes the offset.
5383     SDValue VL =
5384         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5385     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5386     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5387                                   SubVec, SlideupAmt, Mask, VL);
5388     if (VecVT.isFixedLengthVector())
5389       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5390     return DAG.getBitcast(Op.getValueType(), Slideup);
5391   }
5392 
5393   unsigned SubRegIdx, RemIdx;
5394   std::tie(SubRegIdx, RemIdx) =
5395       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5396           VecVT, SubVecVT, OrigIdx, TRI);
5397 
5398   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5399   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5400                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5401                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5402 
5403   // 1. If the Idx has been completely eliminated and this subvector's size is
5404   // a vector register or a multiple thereof, or the surrounding elements are
5405   // undef, then this is a subvector insert which naturally aligns to a vector
5406   // register. These can easily be handled using subregister manipulation.
5407   // 2. If the subvector is smaller than a vector register, then the insertion
5408   // must preserve the undisturbed elements of the register. We do this by
5409   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5410   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5411   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5412   // LMUL=1 type back into the larger vector (resolving to another subregister
5413   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5414   // to avoid allocating a large register group to hold our subvector.
5415   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5416     return Op;
5417 
5418   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5419   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5420   // (in our case undisturbed). This means we can set up a subvector insertion
5421   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5422   // size of the subvector.
5423   MVT InterSubVT = VecVT;
5424   SDValue AlignedExtract = Vec;
5425   unsigned AlignedIdx = OrigIdx - RemIdx;
5426   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5427     InterSubVT = getLMUL1VT(VecVT);
5428     // Extract a subvector equal to the nearest full vector register type. This
5429     // should resolve to a EXTRACT_SUBREG instruction.
5430     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5431                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5432   }
5433 
5434   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5435   // For scalable vectors this must be further multiplied by vscale.
5436   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5437 
5438   SDValue Mask, VL;
5439   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5440 
5441   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5442   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5443   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5444   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5445 
5446   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5447                        DAG.getUNDEF(InterSubVT), SubVec,
5448                        DAG.getConstant(0, DL, XLenVT));
5449 
5450   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5451                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5452 
5453   // If required, insert this subvector back into the correct vector register.
5454   // This should resolve to an INSERT_SUBREG instruction.
5455   if (VecVT.bitsGT(InterSubVT))
5456     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5457                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5458 
5459   // We might have bitcast from a mask type: cast back to the original type if
5460   // required.
5461   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5462 }
5463 
5464 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5465                                                     SelectionDAG &DAG) const {
5466   SDValue Vec = Op.getOperand(0);
5467   MVT SubVecVT = Op.getSimpleValueType();
5468   MVT VecVT = Vec.getSimpleValueType();
5469 
5470   SDLoc DL(Op);
5471   MVT XLenVT = Subtarget.getXLenVT();
5472   unsigned OrigIdx = Op.getConstantOperandVal(1);
5473   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5474 
5475   // We don't have the ability to slide mask vectors down indexed by their i1
5476   // elements; the smallest we can do is i8. Often we are able to bitcast to
5477   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5478   // from a scalable one, we might not necessarily have enough scalable
5479   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5480   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5481     if (VecVT.getVectorMinNumElements() >= 8 &&
5482         SubVecVT.getVectorMinNumElements() >= 8) {
5483       assert(OrigIdx % 8 == 0 && "Invalid index");
5484       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5485              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5486              "Unexpected mask vector lowering");
5487       OrigIdx /= 8;
5488       SubVecVT =
5489           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5490                            SubVecVT.isScalableVector());
5491       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5492                                VecVT.isScalableVector());
5493       Vec = DAG.getBitcast(VecVT, Vec);
5494     } else {
5495       // We can't slide this mask vector down, indexed by its i1 elements.
5496       // This poses a problem when we wish to extract a scalable vector which
5497       // can't be re-expressed as a larger type. Just choose the slow path and
5498       // extend to a larger type, then truncate back down.
5499       // TODO: We could probably improve this when extracting certain fixed
5500       // from fixed, where we can extract as i8 and shift the correct element
5501       // right to reach the desired subvector?
5502       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5503       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5504       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5505       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5506                         Op.getOperand(1));
5507       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5508       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5509     }
5510   }
5511 
5512   // If the subvector vector is a fixed-length type, we cannot use subregister
5513   // manipulation to simplify the codegen; we don't know which register of a
5514   // LMUL group contains the specific subvector as we only know the minimum
5515   // register size. Therefore we must slide the vector group down the full
5516   // amount.
5517   if (SubVecVT.isFixedLengthVector()) {
5518     // With an index of 0 this is a cast-like subvector, which can be performed
5519     // with subregister operations.
5520     if (OrigIdx == 0)
5521       return Op;
5522     MVT ContainerVT = VecVT;
5523     if (VecVT.isFixedLengthVector()) {
5524       ContainerVT = getContainerForFixedLengthVector(VecVT);
5525       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5526     }
5527     SDValue Mask =
5528         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5529     // Set the vector length to only the number of elements we care about. This
5530     // avoids sliding down elements we're going to discard straight away.
5531     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5532     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5533     SDValue Slidedown =
5534         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5535                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5536     // Now we can use a cast-like subvector extract to get the result.
5537     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5538                             DAG.getConstant(0, DL, XLenVT));
5539     return DAG.getBitcast(Op.getValueType(), Slidedown);
5540   }
5541 
5542   unsigned SubRegIdx, RemIdx;
5543   std::tie(SubRegIdx, RemIdx) =
5544       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5545           VecVT, SubVecVT, OrigIdx, TRI);
5546 
5547   // If the Idx has been completely eliminated then this is a subvector extract
5548   // which naturally aligns to a vector register. These can easily be handled
5549   // using subregister manipulation.
5550   if (RemIdx == 0)
5551     return Op;
5552 
5553   // Else we must shift our vector register directly to extract the subvector.
5554   // Do this using VSLIDEDOWN.
5555 
5556   // If the vector type is an LMUL-group type, extract a subvector equal to the
5557   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5558   // instruction.
5559   MVT InterSubVT = VecVT;
5560   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5561     InterSubVT = getLMUL1VT(VecVT);
5562     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5563                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5564   }
5565 
5566   // Slide this vector register down by the desired number of elements in order
5567   // to place the desired subvector starting at element 0.
5568   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5569   // For scalable vectors this must be further multiplied by vscale.
5570   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5571 
5572   SDValue Mask, VL;
5573   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5574   SDValue Slidedown =
5575       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5576                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5577 
5578   // Now the vector is in the right position, extract our final subvector. This
5579   // should resolve to a COPY.
5580   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5581                           DAG.getConstant(0, DL, XLenVT));
5582 
5583   // We might have bitcast from a mask type: cast back to the original type if
5584   // required.
5585   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5586 }
5587 
5588 // Lower step_vector to the vid instruction. Any non-identity step value must
5589 // be accounted for my manual expansion.
5590 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5591                                               SelectionDAG &DAG) const {
5592   SDLoc DL(Op);
5593   MVT VT = Op.getSimpleValueType();
5594   MVT XLenVT = Subtarget.getXLenVT();
5595   SDValue Mask, VL;
5596   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5597   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5598   uint64_t StepValImm = Op.getConstantOperandVal(0);
5599   if (StepValImm != 1) {
5600     if (isPowerOf2_64(StepValImm)) {
5601       SDValue StepVal =
5602           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5603                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5604       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5605     } else {
5606       SDValue StepVal = lowerScalarSplat(
5607           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5608           VL, VT, DL, DAG, Subtarget);
5609       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5610     }
5611   }
5612   return StepVec;
5613 }
5614 
5615 // Implement vector_reverse using vrgather.vv with indices determined by
5616 // subtracting the id of each element from (VLMAX-1). This will convert
5617 // the indices like so:
5618 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5619 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5620 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5621                                                  SelectionDAG &DAG) const {
5622   SDLoc DL(Op);
5623   MVT VecVT = Op.getSimpleValueType();
5624   unsigned EltSize = VecVT.getScalarSizeInBits();
5625   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5626 
5627   unsigned MaxVLMAX = 0;
5628   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5629   if (VectorBitsMax != 0)
5630     MaxVLMAX =
5631         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5632 
5633   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5634   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5635 
5636   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5637   // to use vrgatherei16.vv.
5638   // TODO: It's also possible to use vrgatherei16.vv for other types to
5639   // decrease register width for the index calculation.
5640   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5641     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5642     // Reverse each half, then reassemble them in reverse order.
5643     // NOTE: It's also possible that after splitting that VLMAX no longer
5644     // requires vrgatherei16.vv.
5645     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5646       SDValue Lo, Hi;
5647       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5648       EVT LoVT, HiVT;
5649       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5650       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5651       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5652       // Reassemble the low and high pieces reversed.
5653       // FIXME: This is a CONCAT_VECTORS.
5654       SDValue Res =
5655           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5656                       DAG.getIntPtrConstant(0, DL));
5657       return DAG.getNode(
5658           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5659           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5660     }
5661 
5662     // Just promote the int type to i16 which will double the LMUL.
5663     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5664     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5665   }
5666 
5667   MVT XLenVT = Subtarget.getXLenVT();
5668   SDValue Mask, VL;
5669   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5670 
5671   // Calculate VLMAX-1 for the desired SEW.
5672   unsigned MinElts = VecVT.getVectorMinNumElements();
5673   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5674                               DAG.getConstant(MinElts, DL, XLenVT));
5675   SDValue VLMinus1 =
5676       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5677 
5678   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5679   bool IsRV32E64 =
5680       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5681   SDValue SplatVL;
5682   if (!IsRV32E64)
5683     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5684   else
5685     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5686                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5687 
5688   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5689   SDValue Indices =
5690       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5691 
5692   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5693 }
5694 
5695 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5696                                                 SelectionDAG &DAG) const {
5697   SDLoc DL(Op);
5698   SDValue V1 = Op.getOperand(0);
5699   SDValue V2 = Op.getOperand(1);
5700   MVT XLenVT = Subtarget.getXLenVT();
5701   MVT VecVT = Op.getSimpleValueType();
5702 
5703   unsigned MinElts = VecVT.getVectorMinNumElements();
5704   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5705                               DAG.getConstant(MinElts, DL, XLenVT));
5706 
5707   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5708   SDValue DownOffset, UpOffset;
5709   if (ImmValue >= 0) {
5710     // The operand is a TargetConstant, we need to rebuild it as a regular
5711     // constant.
5712     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5713     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5714   } else {
5715     // The operand is a TargetConstant, we need to rebuild it as a regular
5716     // constant rather than negating the original operand.
5717     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5718     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5719   }
5720 
5721   SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG);
5722 
5723   SDValue SlideDown =
5724       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5725                   DownOffset, TrueMask, UpOffset);
5726   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5727                      TrueMask,
5728                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5729 }
5730 
5731 SDValue
5732 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5733                                                      SelectionDAG &DAG) const {
5734   SDLoc DL(Op);
5735   auto *Load = cast<LoadSDNode>(Op);
5736 
5737   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5738                                         Load->getMemoryVT(),
5739                                         *Load->getMemOperand()) &&
5740          "Expecting a correctly-aligned load");
5741 
5742   MVT VT = Op.getSimpleValueType();
5743   MVT XLenVT = Subtarget.getXLenVT();
5744   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5745 
5746   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5747 
5748   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5749   SDValue IntID = DAG.getTargetConstant(
5750       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5751   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5752   if (!IsMaskOp)
5753     Ops.push_back(DAG.getUNDEF(ContainerVT));
5754   Ops.push_back(Load->getBasePtr());
5755   Ops.push_back(VL);
5756   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5757   SDValue NewLoad =
5758       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5759                               Load->getMemoryVT(), Load->getMemOperand());
5760 
5761   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5762   return DAG.getMergeValues({Result, NewLoad.getValue(1)}, DL);
5763 }
5764 
5765 SDValue
5766 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5767                                                       SelectionDAG &DAG) const {
5768   SDLoc DL(Op);
5769   auto *Store = cast<StoreSDNode>(Op);
5770 
5771   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5772                                         Store->getMemoryVT(),
5773                                         *Store->getMemOperand()) &&
5774          "Expecting a correctly-aligned store");
5775 
5776   SDValue StoreVal = Store->getValue();
5777   MVT VT = StoreVal.getSimpleValueType();
5778   MVT XLenVT = Subtarget.getXLenVT();
5779 
5780   // If the size less than a byte, we need to pad with zeros to make a byte.
5781   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5782     VT = MVT::v8i1;
5783     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5784                            DAG.getConstant(0, DL, VT), StoreVal,
5785                            DAG.getIntPtrConstant(0, DL));
5786   }
5787 
5788   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5789 
5790   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5791 
5792   SDValue NewValue =
5793       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5794 
5795   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5796   SDValue IntID = DAG.getTargetConstant(
5797       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5798   return DAG.getMemIntrinsicNode(
5799       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5800       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5801       Store->getMemoryVT(), Store->getMemOperand());
5802 }
5803 
5804 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5805                                              SelectionDAG &DAG) const {
5806   SDLoc DL(Op);
5807   MVT VT = Op.getSimpleValueType();
5808 
5809   const auto *MemSD = cast<MemSDNode>(Op);
5810   EVT MemVT = MemSD->getMemoryVT();
5811   MachineMemOperand *MMO = MemSD->getMemOperand();
5812   SDValue Chain = MemSD->getChain();
5813   SDValue BasePtr = MemSD->getBasePtr();
5814 
5815   SDValue Mask, PassThru, VL;
5816   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5817     Mask = VPLoad->getMask();
5818     PassThru = DAG.getUNDEF(VT);
5819     VL = VPLoad->getVectorLength();
5820   } else {
5821     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5822     Mask = MLoad->getMask();
5823     PassThru = MLoad->getPassThru();
5824   }
5825 
5826   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5827 
5828   MVT XLenVT = Subtarget.getXLenVT();
5829 
5830   MVT ContainerVT = VT;
5831   if (VT.isFixedLengthVector()) {
5832     ContainerVT = getContainerForFixedLengthVector(VT);
5833     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5834     if (!IsUnmasked) {
5835       MVT MaskVT = getMaskTypeFor(ContainerVT);
5836       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5837     }
5838   }
5839 
5840   if (!VL)
5841     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5842 
5843   unsigned IntID =
5844       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5845   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5846   if (IsUnmasked)
5847     Ops.push_back(DAG.getUNDEF(ContainerVT));
5848   else
5849     Ops.push_back(PassThru);
5850   Ops.push_back(BasePtr);
5851   if (!IsUnmasked)
5852     Ops.push_back(Mask);
5853   Ops.push_back(VL);
5854   if (!IsUnmasked)
5855     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5856 
5857   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5858 
5859   SDValue Result =
5860       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5861   Chain = Result.getValue(1);
5862 
5863   if (VT.isFixedLengthVector())
5864     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5865 
5866   return DAG.getMergeValues({Result, Chain}, DL);
5867 }
5868 
5869 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5870                                               SelectionDAG &DAG) const {
5871   SDLoc DL(Op);
5872 
5873   const auto *MemSD = cast<MemSDNode>(Op);
5874   EVT MemVT = MemSD->getMemoryVT();
5875   MachineMemOperand *MMO = MemSD->getMemOperand();
5876   SDValue Chain = MemSD->getChain();
5877   SDValue BasePtr = MemSD->getBasePtr();
5878   SDValue Val, Mask, VL;
5879 
5880   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5881     Val = VPStore->getValue();
5882     Mask = VPStore->getMask();
5883     VL = VPStore->getVectorLength();
5884   } else {
5885     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5886     Val = MStore->getValue();
5887     Mask = MStore->getMask();
5888   }
5889 
5890   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5891 
5892   MVT VT = Val.getSimpleValueType();
5893   MVT XLenVT = Subtarget.getXLenVT();
5894 
5895   MVT ContainerVT = VT;
5896   if (VT.isFixedLengthVector()) {
5897     ContainerVT = getContainerForFixedLengthVector(VT);
5898 
5899     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5900     if (!IsUnmasked) {
5901       MVT MaskVT = getMaskTypeFor(ContainerVT);
5902       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5903     }
5904   }
5905 
5906   if (!VL)
5907     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5908 
5909   unsigned IntID =
5910       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5911   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5912   Ops.push_back(Val);
5913   Ops.push_back(BasePtr);
5914   if (!IsUnmasked)
5915     Ops.push_back(Mask);
5916   Ops.push_back(VL);
5917 
5918   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5919                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5920 }
5921 
5922 SDValue
5923 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5924                                                       SelectionDAG &DAG) const {
5925   MVT InVT = Op.getOperand(0).getSimpleValueType();
5926   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5927 
5928   MVT VT = Op.getSimpleValueType();
5929 
5930   SDValue Op1 =
5931       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5932   SDValue Op2 =
5933       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5934 
5935   SDLoc DL(Op);
5936   SDValue VL =
5937       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5938 
5939   MVT MaskVT = getMaskTypeFor(ContainerVT);
5940   SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
5941 
5942   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5943                             Op.getOperand(2), Mask, VL);
5944 
5945   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5946 }
5947 
5948 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5949     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5950   MVT VT = Op.getSimpleValueType();
5951 
5952   if (VT.getVectorElementType() == MVT::i1)
5953     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5954 
5955   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5956 }
5957 
5958 SDValue
5959 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5960                                                       SelectionDAG &DAG) const {
5961   unsigned Opc;
5962   switch (Op.getOpcode()) {
5963   default: llvm_unreachable("Unexpected opcode!");
5964   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5965   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5966   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5967   }
5968 
5969   return lowerToScalableOp(Op, DAG, Opc);
5970 }
5971 
5972 // Lower vector ABS to smax(X, sub(0, X)).
5973 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5974   SDLoc DL(Op);
5975   MVT VT = Op.getSimpleValueType();
5976   SDValue X = Op.getOperand(0);
5977 
5978   assert(VT.isFixedLengthVector() && "Unexpected type");
5979 
5980   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5981   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5982 
5983   SDValue Mask, VL;
5984   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5985 
5986   SDValue SplatZero = DAG.getNode(
5987       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5988       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5989   SDValue NegX =
5990       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5991   SDValue Max =
5992       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5993 
5994   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5995 }
5996 
5997 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5998     SDValue Op, SelectionDAG &DAG) const {
5999   SDLoc DL(Op);
6000   MVT VT = Op.getSimpleValueType();
6001   SDValue Mag = Op.getOperand(0);
6002   SDValue Sign = Op.getOperand(1);
6003   assert(Mag.getValueType() == Sign.getValueType() &&
6004          "Can only handle COPYSIGN with matching types.");
6005 
6006   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6007   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
6008   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
6009 
6010   SDValue Mask, VL;
6011   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6012 
6013   SDValue CopySign =
6014       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
6015 
6016   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
6017 }
6018 
6019 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
6020     SDValue Op, SelectionDAG &DAG) const {
6021   MVT VT = Op.getSimpleValueType();
6022   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6023 
6024   MVT I1ContainerVT =
6025       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6026 
6027   SDValue CC =
6028       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6029   SDValue Op1 =
6030       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6031   SDValue Op2 =
6032       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6033 
6034   SDLoc DL(Op);
6035   SDValue Mask, VL;
6036   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6037 
6038   SDValue Select =
6039       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6040 
6041   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6042 }
6043 
6044 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6045                                                unsigned NewOpc,
6046                                                bool HasMask) const {
6047   MVT VT = Op.getSimpleValueType();
6048   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6049 
6050   // Create list of operands by converting existing ones to scalable types.
6051   SmallVector<SDValue, 6> Ops;
6052   for (const SDValue &V : Op->op_values()) {
6053     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6054 
6055     // Pass through non-vector operands.
6056     if (!V.getValueType().isVector()) {
6057       Ops.push_back(V);
6058       continue;
6059     }
6060 
6061     // "cast" fixed length vector to a scalable vector.
6062     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6063            "Only fixed length vectors are supported!");
6064     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6065   }
6066 
6067   SDLoc DL(Op);
6068   SDValue Mask, VL;
6069   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6070   if (HasMask)
6071     Ops.push_back(Mask);
6072   Ops.push_back(VL);
6073 
6074   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6075   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6076 }
6077 
6078 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6079 // * Operands of each node are assumed to be in the same order.
6080 // * The EVL operand is promoted from i32 to i64 on RV64.
6081 // * Fixed-length vectors are converted to their scalable-vector container
6082 //   types.
6083 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6084                                        unsigned RISCVISDOpc) const {
6085   SDLoc DL(Op);
6086   MVT VT = Op.getSimpleValueType();
6087   SmallVector<SDValue, 4> Ops;
6088 
6089   for (const auto &OpIdx : enumerate(Op->ops())) {
6090     SDValue V = OpIdx.value();
6091     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6092     // Pass through operands which aren't fixed-length vectors.
6093     if (!V.getValueType().isFixedLengthVector()) {
6094       Ops.push_back(V);
6095       continue;
6096     }
6097     // "cast" fixed length vector to a scalable vector.
6098     MVT OpVT = V.getSimpleValueType();
6099     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6100     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6101            "Only fixed length vectors are supported!");
6102     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6103   }
6104 
6105   if (!VT.isFixedLengthVector())
6106     return DAG.getNode(RISCVISDOpc, DL, VT, Ops, Op->getFlags());
6107 
6108   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6109 
6110   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops, Op->getFlags());
6111 
6112   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6113 }
6114 
6115 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6116                                               SelectionDAG &DAG) const {
6117   SDLoc DL(Op);
6118   MVT VT = Op.getSimpleValueType();
6119 
6120   SDValue Src = Op.getOperand(0);
6121   // NOTE: Mask is dropped.
6122   SDValue VL = Op.getOperand(2);
6123 
6124   MVT ContainerVT = VT;
6125   if (VT.isFixedLengthVector()) {
6126     ContainerVT = getContainerForFixedLengthVector(VT);
6127     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6128     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6129   }
6130 
6131   MVT XLenVT = Subtarget.getXLenVT();
6132   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6133   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6134                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6135 
6136   SDValue SplatValue = DAG.getConstant(
6137       Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT);
6138   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6139                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6140 
6141   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6142                                Splat, ZeroSplat, VL);
6143   if (!VT.isFixedLengthVector())
6144     return Result;
6145   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6146 }
6147 
6148 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
6149                                                 SelectionDAG &DAG) const {
6150   SDLoc DL(Op);
6151   MVT VT = Op.getSimpleValueType();
6152 
6153   SDValue Op1 = Op.getOperand(0);
6154   SDValue Op2 = Op.getOperand(1);
6155   ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get();
6156   // NOTE: Mask is dropped.
6157   SDValue VL = Op.getOperand(4);
6158 
6159   MVT ContainerVT = VT;
6160   if (VT.isFixedLengthVector()) {
6161     ContainerVT = getContainerForFixedLengthVector(VT);
6162     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6163     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6164   }
6165 
6166   SDValue Result;
6167   SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
6168 
6169   switch (Condition) {
6170   default:
6171     break;
6172   // X != Y  --> (X^Y)
6173   case ISD::SETNE:
6174     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6175     break;
6176   // X == Y  --> ~(X^Y)
6177   case ISD::SETEQ: {
6178     SDValue Temp =
6179         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL);
6180     Result =
6181         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL);
6182     break;
6183   }
6184   // X >s Y   -->  X == 0 & Y == 1  -->  ~X & Y
6185   // X <u Y   -->  X == 0 & Y == 1  -->  ~X & Y
6186   case ISD::SETGT:
6187   case ISD::SETULT: {
6188     SDValue Temp =
6189         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6190     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL);
6191     break;
6192   }
6193   // X <s Y   --> X == 1 & Y == 0  -->  ~Y & X
6194   // X >u Y   --> X == 1 & Y == 0  -->  ~Y & X
6195   case ISD::SETLT:
6196   case ISD::SETUGT: {
6197     SDValue Temp =
6198         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6199     Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL);
6200     break;
6201   }
6202   // X >=s Y  --> X == 0 | Y == 1  -->  ~X | Y
6203   // X <=u Y  --> X == 0 | Y == 1  -->  ~X | Y
6204   case ISD::SETGE:
6205   case ISD::SETULE: {
6206     SDValue Temp =
6207         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL);
6208     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL);
6209     break;
6210   }
6211   // X <=s Y  --> X == 1 | Y == 0  -->  ~Y | X
6212   // X >=u Y  --> X == 1 | Y == 0  -->  ~Y | X
6213   case ISD::SETLE:
6214   case ISD::SETUGE: {
6215     SDValue Temp =
6216         DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL);
6217     Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL);
6218     break;
6219   }
6220   }
6221 
6222   if (!VT.isFixedLengthVector())
6223     return Result;
6224   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6225 }
6226 
6227 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6228 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6229                                                 unsigned RISCVISDOpc) const {
6230   SDLoc DL(Op);
6231 
6232   SDValue Src = Op.getOperand(0);
6233   SDValue Mask = Op.getOperand(1);
6234   SDValue VL = Op.getOperand(2);
6235 
6236   MVT DstVT = Op.getSimpleValueType();
6237   MVT SrcVT = Src.getSimpleValueType();
6238   if (DstVT.isFixedLengthVector()) {
6239     DstVT = getContainerForFixedLengthVector(DstVT);
6240     SrcVT = getContainerForFixedLengthVector(SrcVT);
6241     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6242     MVT MaskVT = getMaskTypeFor(DstVT);
6243     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6244   }
6245 
6246   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6247                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6248                                 ? RISCVISD::VSEXT_VL
6249                                 : RISCVISD::VZEXT_VL;
6250 
6251   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6252   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6253 
6254   SDValue Result;
6255   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6256     if (SrcVT.isInteger()) {
6257       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6258 
6259       // Do we need to do any pre-widening before converting?
6260       if (SrcEltSize == 1) {
6261         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6262         MVT XLenVT = Subtarget.getXLenVT();
6263         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6264         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6265                                         DAG.getUNDEF(IntVT), Zero, VL);
6266         SDValue One = DAG.getConstant(
6267             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6268         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6269                                        DAG.getUNDEF(IntVT), One, VL);
6270         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6271                           ZeroSplat, VL);
6272       } else if (DstEltSize > (2 * SrcEltSize)) {
6273         // Widen before converting.
6274         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6275                                      DstVT.getVectorElementCount());
6276         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6277       }
6278 
6279       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6280     } else {
6281       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6282              "Wrong input/output vector types");
6283 
6284       // Convert f16 to f32 then convert f32 to i64.
6285       if (DstEltSize > (2 * SrcEltSize)) {
6286         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6287         MVT InterimFVT =
6288             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6289         Src =
6290             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6291       }
6292 
6293       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6294     }
6295   } else { // Narrowing + Conversion
6296     if (SrcVT.isInteger()) {
6297       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6298       // First do a narrowing convert to an FP type half the size, then round
6299       // the FP type to a small FP type if needed.
6300 
6301       MVT InterimFVT = DstVT;
6302       if (SrcEltSize > (2 * DstEltSize)) {
6303         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6304         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6305         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6306       }
6307 
6308       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6309 
6310       if (InterimFVT != DstVT) {
6311         Src = Result;
6312         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6313       }
6314     } else {
6315       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6316              "Wrong input/output vector types");
6317       // First do a narrowing conversion to an integer half the size, then
6318       // truncate if needed.
6319 
6320       if (DstEltSize == 1) {
6321         // First convert to the same size integer, then convert to mask using
6322         // setcc.
6323         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6324         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6325                                           DstVT.getVectorElementCount());
6326         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6327 
6328         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6329         // otherwise the conversion was undefined.
6330         MVT XLenVT = Subtarget.getXLenVT();
6331         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6332         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6333                                 DAG.getUNDEF(InterimIVT), SplatZero);
6334         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6335                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6336       } else {
6337         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6338                                           DstVT.getVectorElementCount());
6339 
6340         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6341 
6342         while (InterimIVT != DstVT) {
6343           SrcEltSize /= 2;
6344           Src = Result;
6345           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6346                                         DstVT.getVectorElementCount());
6347           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6348                                Src, Mask, VL);
6349         }
6350       }
6351     }
6352   }
6353 
6354   MVT VT = Op.getSimpleValueType();
6355   if (!VT.isFixedLengthVector())
6356     return Result;
6357   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6358 }
6359 
6360 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6361                                             unsigned MaskOpc,
6362                                             unsigned VecOpc) const {
6363   MVT VT = Op.getSimpleValueType();
6364   if (VT.getVectorElementType() != MVT::i1)
6365     return lowerVPOp(Op, DAG, VecOpc);
6366 
6367   // It is safe to drop mask parameter as masked-off elements are undef.
6368   SDValue Op1 = Op->getOperand(0);
6369   SDValue Op2 = Op->getOperand(1);
6370   SDValue VL = Op->getOperand(3);
6371 
6372   MVT ContainerVT = VT;
6373   const bool IsFixed = VT.isFixedLengthVector();
6374   if (IsFixed) {
6375     ContainerVT = getContainerForFixedLengthVector(VT);
6376     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6377     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6378   }
6379 
6380   SDLoc DL(Op);
6381   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6382   if (!IsFixed)
6383     return Val;
6384   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6385 }
6386 
6387 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6388 // matched to a RVV indexed load. The RVV indexed load instructions only
6389 // support the "unsigned unscaled" addressing mode; indices are implicitly
6390 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6391 // signed or scaled indexing is extended to the XLEN value type and scaled
6392 // accordingly.
6393 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6394                                                SelectionDAG &DAG) const {
6395   SDLoc DL(Op);
6396   MVT VT = Op.getSimpleValueType();
6397 
6398   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6399   EVT MemVT = MemSD->getMemoryVT();
6400   MachineMemOperand *MMO = MemSD->getMemOperand();
6401   SDValue Chain = MemSD->getChain();
6402   SDValue BasePtr = MemSD->getBasePtr();
6403 
6404   ISD::LoadExtType LoadExtType;
6405   SDValue Index, Mask, PassThru, VL;
6406 
6407   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6408     Index = VPGN->getIndex();
6409     Mask = VPGN->getMask();
6410     PassThru = DAG.getUNDEF(VT);
6411     VL = VPGN->getVectorLength();
6412     // VP doesn't support extending loads.
6413     LoadExtType = ISD::NON_EXTLOAD;
6414   } else {
6415     // Else it must be a MGATHER.
6416     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6417     Index = MGN->getIndex();
6418     Mask = MGN->getMask();
6419     PassThru = MGN->getPassThru();
6420     LoadExtType = MGN->getExtensionType();
6421   }
6422 
6423   MVT IndexVT = Index.getSimpleValueType();
6424   MVT XLenVT = Subtarget.getXLenVT();
6425 
6426   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6427          "Unexpected VTs!");
6428   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6429   // Targets have to explicitly opt-in for extending vector loads.
6430   assert(LoadExtType == ISD::NON_EXTLOAD &&
6431          "Unexpected extending MGATHER/VP_GATHER");
6432   (void)LoadExtType;
6433 
6434   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6435   // the selection of the masked intrinsics doesn't do this for us.
6436   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6437 
6438   MVT ContainerVT = VT;
6439   if (VT.isFixedLengthVector()) {
6440     ContainerVT = getContainerForFixedLengthVector(VT);
6441     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6442                                ContainerVT.getVectorElementCount());
6443 
6444     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6445 
6446     if (!IsUnmasked) {
6447       MVT MaskVT = getMaskTypeFor(ContainerVT);
6448       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6449       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6450     }
6451   }
6452 
6453   if (!VL)
6454     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6455 
6456   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6457     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6458     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6459                                    VL);
6460     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6461                         TrueMask, VL);
6462   }
6463 
6464   unsigned IntID =
6465       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6466   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6467   if (IsUnmasked)
6468     Ops.push_back(DAG.getUNDEF(ContainerVT));
6469   else
6470     Ops.push_back(PassThru);
6471   Ops.push_back(BasePtr);
6472   Ops.push_back(Index);
6473   if (!IsUnmasked)
6474     Ops.push_back(Mask);
6475   Ops.push_back(VL);
6476   if (!IsUnmasked)
6477     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6478 
6479   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6480   SDValue Result =
6481       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6482   Chain = Result.getValue(1);
6483 
6484   if (VT.isFixedLengthVector())
6485     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6486 
6487   return DAG.getMergeValues({Result, Chain}, DL);
6488 }
6489 
6490 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6491 // matched to a RVV indexed store. The RVV indexed store instructions only
6492 // support the "unsigned unscaled" addressing mode; indices are implicitly
6493 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6494 // signed or scaled indexing is extended to the XLEN value type and scaled
6495 // accordingly.
6496 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6497                                                 SelectionDAG &DAG) const {
6498   SDLoc DL(Op);
6499   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6500   EVT MemVT = MemSD->getMemoryVT();
6501   MachineMemOperand *MMO = MemSD->getMemOperand();
6502   SDValue Chain = MemSD->getChain();
6503   SDValue BasePtr = MemSD->getBasePtr();
6504 
6505   bool IsTruncatingStore = false;
6506   SDValue Index, Mask, Val, VL;
6507 
6508   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6509     Index = VPSN->getIndex();
6510     Mask = VPSN->getMask();
6511     Val = VPSN->getValue();
6512     VL = VPSN->getVectorLength();
6513     // VP doesn't support truncating stores.
6514     IsTruncatingStore = false;
6515   } else {
6516     // Else it must be a MSCATTER.
6517     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6518     Index = MSN->getIndex();
6519     Mask = MSN->getMask();
6520     Val = MSN->getValue();
6521     IsTruncatingStore = MSN->isTruncatingStore();
6522   }
6523 
6524   MVT VT = Val.getSimpleValueType();
6525   MVT IndexVT = Index.getSimpleValueType();
6526   MVT XLenVT = Subtarget.getXLenVT();
6527 
6528   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6529          "Unexpected VTs!");
6530   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6531   // Targets have to explicitly opt-in for extending vector loads and
6532   // truncating vector stores.
6533   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6534   (void)IsTruncatingStore;
6535 
6536   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6537   // the selection of the masked intrinsics doesn't do this for us.
6538   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6539 
6540   MVT ContainerVT = VT;
6541   if (VT.isFixedLengthVector()) {
6542     ContainerVT = getContainerForFixedLengthVector(VT);
6543     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6544                                ContainerVT.getVectorElementCount());
6545 
6546     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6547     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6548 
6549     if (!IsUnmasked) {
6550       MVT MaskVT = getMaskTypeFor(ContainerVT);
6551       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6552     }
6553   }
6554 
6555   if (!VL)
6556     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6557 
6558   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6559     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6560     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6561                                    VL);
6562     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6563                         TrueMask, VL);
6564   }
6565 
6566   unsigned IntID =
6567       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6568   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6569   Ops.push_back(Val);
6570   Ops.push_back(BasePtr);
6571   Ops.push_back(Index);
6572   if (!IsUnmasked)
6573     Ops.push_back(Mask);
6574   Ops.push_back(VL);
6575 
6576   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6577                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6578 }
6579 
6580 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6581                                                SelectionDAG &DAG) const {
6582   const MVT XLenVT = Subtarget.getXLenVT();
6583   SDLoc DL(Op);
6584   SDValue Chain = Op->getOperand(0);
6585   SDValue SysRegNo = DAG.getTargetConstant(
6586       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6587   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6588   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6589 
6590   // Encoding used for rounding mode in RISCV differs from that used in
6591   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6592   // table, which consists of a sequence of 4-bit fields, each representing
6593   // corresponding FLT_ROUNDS mode.
6594   static const int Table =
6595       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6596       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6597       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6598       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6599       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6600 
6601   SDValue Shift =
6602       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6603   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6604                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6605   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6606                                DAG.getConstant(7, DL, XLenVT));
6607 
6608   return DAG.getMergeValues({Masked, Chain}, DL);
6609 }
6610 
6611 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6612                                                SelectionDAG &DAG) const {
6613   const MVT XLenVT = Subtarget.getXLenVT();
6614   SDLoc DL(Op);
6615   SDValue Chain = Op->getOperand(0);
6616   SDValue RMValue = Op->getOperand(1);
6617   SDValue SysRegNo = DAG.getTargetConstant(
6618       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6619 
6620   // Encoding used for rounding mode in RISCV differs from that used in
6621   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6622   // a table, which consists of a sequence of 4-bit fields, each representing
6623   // corresponding RISCV mode.
6624   static const unsigned Table =
6625       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6626       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6627       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6628       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6629       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6630 
6631   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6632                               DAG.getConstant(2, DL, XLenVT));
6633   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6634                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6635   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6636                         DAG.getConstant(0x7, DL, XLenVT));
6637   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6638                      RMValue);
6639 }
6640 
6641 SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
6642                                                SelectionDAG &DAG) const {
6643   MachineFunction &MF = DAG.getMachineFunction();
6644 
6645   bool isRISCV64 = Subtarget.is64Bit();
6646   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6647 
6648   int FI = MF.getFrameInfo().CreateFixedObject(isRISCV64 ? 8 : 4, 0, false);
6649   return DAG.getFrameIndex(FI, PtrVT);
6650 }
6651 
6652 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6653   switch (IntNo) {
6654   default:
6655     llvm_unreachable("Unexpected Intrinsic");
6656   case Intrinsic::riscv_bcompress:
6657     return RISCVISD::BCOMPRESSW;
6658   case Intrinsic::riscv_bdecompress:
6659     return RISCVISD::BDECOMPRESSW;
6660   case Intrinsic::riscv_bfp:
6661     return RISCVISD::BFPW;
6662   case Intrinsic::riscv_fsl:
6663     return RISCVISD::FSLW;
6664   case Intrinsic::riscv_fsr:
6665     return RISCVISD::FSRW;
6666   }
6667 }
6668 
6669 // Converts the given intrinsic to a i64 operation with any extension.
6670 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6671                                          unsigned IntNo) {
6672   SDLoc DL(N);
6673   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6674   // Deal with the Instruction Operands
6675   SmallVector<SDValue, 3> NewOps;
6676   for (SDValue Op : drop_begin(N->ops()))
6677     // Promote the operand to i64 type
6678     NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op));
6679   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps);
6680   // ReplaceNodeResults requires we maintain the same type for the return value.
6681   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6682 }
6683 
6684 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6685 // form of the given Opcode.
6686 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6687   switch (Opcode) {
6688   default:
6689     llvm_unreachable("Unexpected opcode");
6690   case ISD::SHL:
6691     return RISCVISD::SLLW;
6692   case ISD::SRA:
6693     return RISCVISD::SRAW;
6694   case ISD::SRL:
6695     return RISCVISD::SRLW;
6696   case ISD::SDIV:
6697     return RISCVISD::DIVW;
6698   case ISD::UDIV:
6699     return RISCVISD::DIVUW;
6700   case ISD::UREM:
6701     return RISCVISD::REMUW;
6702   case ISD::ROTL:
6703     return RISCVISD::ROLW;
6704   case ISD::ROTR:
6705     return RISCVISD::RORW;
6706   }
6707 }
6708 
6709 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6710 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6711 // otherwise be promoted to i64, making it difficult to select the
6712 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6713 // type i8/i16/i32 is lost.
6714 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6715                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6716   SDLoc DL(N);
6717   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6718   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6719   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6720   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6721   // ReplaceNodeResults requires we maintain the same type for the return value.
6722   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6723 }
6724 
6725 // Converts the given 32-bit operation to a i64 operation with signed extension
6726 // semantic to reduce the signed extension instructions.
6727 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6728   SDLoc DL(N);
6729   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6730   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6731   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6732   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6733                                DAG.getValueType(MVT::i32));
6734   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6735 }
6736 
6737 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6738                                              SmallVectorImpl<SDValue> &Results,
6739                                              SelectionDAG &DAG) const {
6740   SDLoc DL(N);
6741   switch (N->getOpcode()) {
6742   default:
6743     llvm_unreachable("Don't know how to custom type legalize this operation!");
6744   case ISD::STRICT_FP_TO_SINT:
6745   case ISD::STRICT_FP_TO_UINT:
6746   case ISD::FP_TO_SINT:
6747   case ISD::FP_TO_UINT: {
6748     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6749            "Unexpected custom legalisation");
6750     bool IsStrict = N->isStrictFPOpcode();
6751     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6752                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6753     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6754     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6755         TargetLowering::TypeSoftenFloat) {
6756       if (!isTypeLegal(Op0.getValueType()))
6757         return;
6758       if (IsStrict) {
6759         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6760                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6761         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6762         SDValue Res = DAG.getNode(
6763             Opc, DL, VTs, N->getOperand(0), Op0,
6764             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6765         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6766         Results.push_back(Res.getValue(1));
6767         return;
6768       }
6769       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6770       SDValue Res =
6771           DAG.getNode(Opc, DL, MVT::i64, Op0,
6772                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6773       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6774       return;
6775     }
6776     // If the FP type needs to be softened, emit a library call using the 'si'
6777     // version. If we left it to default legalization we'd end up with 'di'. If
6778     // the FP type doesn't need to be softened just let generic type
6779     // legalization promote the result type.
6780     RTLIB::Libcall LC;
6781     if (IsSigned)
6782       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6783     else
6784       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6785     MakeLibCallOptions CallOptions;
6786     EVT OpVT = Op0.getValueType();
6787     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6788     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6789     SDValue Result;
6790     std::tie(Result, Chain) =
6791         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6792     Results.push_back(Result);
6793     if (IsStrict)
6794       Results.push_back(Chain);
6795     break;
6796   }
6797   case ISD::READCYCLECOUNTER: {
6798     assert(!Subtarget.is64Bit() &&
6799            "READCYCLECOUNTER only has custom type legalization on riscv32");
6800 
6801     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6802     SDValue RCW =
6803         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6804 
6805     Results.push_back(
6806         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6807     Results.push_back(RCW.getValue(2));
6808     break;
6809   }
6810   case ISD::MUL: {
6811     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6812     unsigned XLen = Subtarget.getXLen();
6813     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6814     if (Size > XLen) {
6815       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6816       SDValue LHS = N->getOperand(0);
6817       SDValue RHS = N->getOperand(1);
6818       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6819 
6820       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6821       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6822       // We need exactly one side to be unsigned.
6823       if (LHSIsU == RHSIsU)
6824         return;
6825 
6826       auto MakeMULPair = [&](SDValue S, SDValue U) {
6827         MVT XLenVT = Subtarget.getXLenVT();
6828         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6829         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6830         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6831         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6832         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6833       };
6834 
6835       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6836       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6837 
6838       // The other operand should be signed, but still prefer MULH when
6839       // possible.
6840       if (RHSIsU && LHSIsS && !RHSIsS)
6841         Results.push_back(MakeMULPair(LHS, RHS));
6842       else if (LHSIsU && RHSIsS && !LHSIsS)
6843         Results.push_back(MakeMULPair(RHS, LHS));
6844 
6845       return;
6846     }
6847     LLVM_FALLTHROUGH;
6848   }
6849   case ISD::ADD:
6850   case ISD::SUB:
6851     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6852            "Unexpected custom legalisation");
6853     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6854     break;
6855   case ISD::SHL:
6856   case ISD::SRA:
6857   case ISD::SRL:
6858     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6859            "Unexpected custom legalisation");
6860     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6861       // If we can use a BSET instruction, allow default promotion to apply.
6862       if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() &&
6863           isOneConstant(N->getOperand(0)))
6864         break;
6865       Results.push_back(customLegalizeToWOp(N, DAG));
6866       break;
6867     }
6868 
6869     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6870     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6871     // shift amount.
6872     if (N->getOpcode() == ISD::SHL) {
6873       SDLoc DL(N);
6874       SDValue NewOp0 =
6875           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6876       SDValue NewOp1 =
6877           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6878       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6879       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6880                                    DAG.getValueType(MVT::i32));
6881       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6882     }
6883 
6884     break;
6885   case ISD::ROTL:
6886   case ISD::ROTR:
6887     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6888            "Unexpected custom legalisation");
6889     Results.push_back(customLegalizeToWOp(N, DAG));
6890     break;
6891   case ISD::CTTZ:
6892   case ISD::CTTZ_ZERO_UNDEF:
6893   case ISD::CTLZ:
6894   case ISD::CTLZ_ZERO_UNDEF: {
6895     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6896            "Unexpected custom legalisation");
6897 
6898     SDValue NewOp0 =
6899         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6900     bool IsCTZ =
6901         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6902     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6903     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6904     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6905     return;
6906   }
6907   case ISD::SDIV:
6908   case ISD::UDIV:
6909   case ISD::UREM: {
6910     MVT VT = N->getSimpleValueType(0);
6911     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6912            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6913            "Unexpected custom legalisation");
6914     // Don't promote division/remainder by constant since we should expand those
6915     // to multiply by magic constant.
6916     // FIXME: What if the expansion is disabled for minsize.
6917     if (N->getOperand(1).getOpcode() == ISD::Constant)
6918       return;
6919 
6920     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6921     // the upper 32 bits. For other types we need to sign or zero extend
6922     // based on the opcode.
6923     unsigned ExtOpc = ISD::ANY_EXTEND;
6924     if (VT != MVT::i32)
6925       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6926                                            : ISD::ZERO_EXTEND;
6927 
6928     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6929     break;
6930   }
6931   case ISD::UADDO:
6932   case ISD::USUBO: {
6933     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6934            "Unexpected custom legalisation");
6935     bool IsAdd = N->getOpcode() == ISD::UADDO;
6936     // Create an ADDW or SUBW.
6937     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6938     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6939     SDValue Res =
6940         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6941     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6942                       DAG.getValueType(MVT::i32));
6943 
6944     SDValue Overflow;
6945     if (IsAdd && isOneConstant(RHS)) {
6946       // Special case uaddo X, 1 overflowed if the addition result is 0.
6947       // The general case (X + C) < C is not necessarily beneficial. Although we
6948       // reduce the live range of X, we may introduce the materialization of
6949       // constant C, especially when the setcc result is used by branch. We have
6950       // no compare with constant and branch instructions.
6951       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6952                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6953     } else {
6954       // Sign extend the LHS and perform an unsigned compare with the ADDW
6955       // result. Since the inputs are sign extended from i32, this is equivalent
6956       // to comparing the lower 32 bits.
6957       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6958       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6959                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6960     }
6961 
6962     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6963     Results.push_back(Overflow);
6964     return;
6965   }
6966   case ISD::UADDSAT:
6967   case ISD::USUBSAT: {
6968     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6969            "Unexpected custom legalisation");
6970     if (Subtarget.hasStdExtZbb()) {
6971       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6972       // sign extend allows overflow of the lower 32 bits to be detected on
6973       // the promoted size.
6974       SDValue LHS =
6975           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6976       SDValue RHS =
6977           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6978       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6979       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6980       return;
6981     }
6982 
6983     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6984     // promotion for UADDO/USUBO.
6985     Results.push_back(expandAddSubSat(N, DAG));
6986     return;
6987   }
6988   case ISD::ABS: {
6989     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6990            "Unexpected custom legalisation");
6991           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6992 
6993     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6994 
6995     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6996 
6997     // Freeze the source so we can increase it's use count.
6998     Src = DAG.getFreeze(Src);
6999 
7000     // Copy sign bit to all bits using the sraiw pattern.
7001     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
7002                                    DAG.getValueType(MVT::i32));
7003     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
7004                            DAG.getConstant(31, DL, MVT::i64));
7005 
7006     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
7007     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
7008 
7009     // NOTE: The result is only required to be anyextended, but sext is
7010     // consistent with type legalization of sub.
7011     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
7012                          DAG.getValueType(MVT::i32));
7013     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
7014     return;
7015   }
7016   case ISD::BITCAST: {
7017     EVT VT = N->getValueType(0);
7018     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
7019     SDValue Op0 = N->getOperand(0);
7020     EVT Op0VT = Op0.getValueType();
7021     MVT XLenVT = Subtarget.getXLenVT();
7022     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
7023       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
7024       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
7025     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
7026                Subtarget.hasStdExtF()) {
7027       SDValue FPConv =
7028           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
7029       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
7030     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
7031                isTypeLegal(Op0VT)) {
7032       // Custom-legalize bitcasts from fixed-length vector types to illegal
7033       // scalar types in order to improve codegen. Bitcast the vector to a
7034       // one-element vector type whose element type is the same as the result
7035       // type, and extract the first element.
7036       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
7037       if (isTypeLegal(BVT)) {
7038         SDValue BVec = DAG.getBitcast(BVT, Op0);
7039         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
7040                                       DAG.getConstant(0, DL, XLenVT)));
7041       }
7042     }
7043     break;
7044   }
7045   case RISCVISD::GREV:
7046   case RISCVISD::GORC:
7047   case RISCVISD::SHFL: {
7048     MVT VT = N->getSimpleValueType(0);
7049     MVT XLenVT = Subtarget.getXLenVT();
7050     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7051            "Unexpected custom legalisation");
7052     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7053     assert((Subtarget.hasStdExtZbp() ||
7054             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7055              N->getConstantOperandVal(1) == 7)) &&
7056            "Unexpected extension");
7057     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7058     SDValue NewOp1 =
7059         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7060     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7061     // ReplaceNodeResults requires we maintain the same type for the return
7062     // value.
7063     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7064     break;
7065   }
7066   case ISD::BSWAP:
7067   case ISD::BITREVERSE: {
7068     MVT VT = N->getSimpleValueType(0);
7069     MVT XLenVT = Subtarget.getXLenVT();
7070     assert((VT == MVT::i8 || VT == MVT::i16 ||
7071             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7072            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7073     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7074     unsigned Imm = VT.getSizeInBits() - 1;
7075     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7076     if (N->getOpcode() == ISD::BSWAP)
7077       Imm &= ~0x7U;
7078     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7079                                 DAG.getConstant(Imm, DL, XLenVT));
7080     // ReplaceNodeResults requires we maintain the same type for the return
7081     // value.
7082     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7083     break;
7084   }
7085   case ISD::FSHL:
7086   case ISD::FSHR: {
7087     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7088            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7089     SDValue NewOp0 =
7090         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7091     SDValue NewOp1 =
7092         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7093     SDValue NewShAmt =
7094         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7095     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7096     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7097     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7098                            DAG.getConstant(0x1f, DL, MVT::i64));
7099     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7100     // instruction use different orders. fshl will return its first operand for
7101     // shift of zero, fshr will return its second operand. fsl and fsr both
7102     // return rs1 so the ISD nodes need to have different operand orders.
7103     // Shift amount is in rs2.
7104     unsigned Opc = RISCVISD::FSLW;
7105     if (N->getOpcode() == ISD::FSHR) {
7106       std::swap(NewOp0, NewOp1);
7107       Opc = RISCVISD::FSRW;
7108     }
7109     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7110     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7111     break;
7112   }
7113   case ISD::EXTRACT_VECTOR_ELT: {
7114     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7115     // type is illegal (currently only vXi64 RV32).
7116     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7117     // transferred to the destination register. We issue two of these from the
7118     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7119     // first element.
7120     SDValue Vec = N->getOperand(0);
7121     SDValue Idx = N->getOperand(1);
7122 
7123     // The vector type hasn't been legalized yet so we can't issue target
7124     // specific nodes if it needs legalization.
7125     // FIXME: We would manually legalize if it's important.
7126     if (!isTypeLegal(Vec.getValueType()))
7127       return;
7128 
7129     MVT VecVT = Vec.getSimpleValueType();
7130 
7131     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7132            VecVT.getVectorElementType() == MVT::i64 &&
7133            "Unexpected EXTRACT_VECTOR_ELT legalization");
7134 
7135     // If this is a fixed vector, we need to convert it to a scalable vector.
7136     MVT ContainerVT = VecVT;
7137     if (VecVT.isFixedLengthVector()) {
7138       ContainerVT = getContainerForFixedLengthVector(VecVT);
7139       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7140     }
7141 
7142     MVT XLenVT = Subtarget.getXLenVT();
7143 
7144     // Use a VL of 1 to avoid processing more elements than we need.
7145     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7146     SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG);
7147 
7148     // Unless the index is known to be 0, we must slide the vector down to get
7149     // the desired element into index 0.
7150     if (!isNullConstant(Idx)) {
7151       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7152                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7153     }
7154 
7155     // Extract the lower XLEN bits of the correct vector element.
7156     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7157 
7158     // To extract the upper XLEN bits of the vector element, shift the first
7159     // element right by 32 bits and re-extract the lower XLEN bits.
7160     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7161                                      DAG.getUNDEF(ContainerVT),
7162                                      DAG.getConstant(32, DL, XLenVT), VL);
7163     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7164                                  ThirtyTwoV, Mask, VL);
7165 
7166     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7167 
7168     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7169     break;
7170   }
7171   case ISD::INTRINSIC_WO_CHAIN: {
7172     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7173     switch (IntNo) {
7174     default:
7175       llvm_unreachable(
7176           "Don't know how to custom type legalize this intrinsic!");
7177     case Intrinsic::riscv_grev:
7178     case Intrinsic::riscv_gorc: {
7179       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7180              "Unexpected custom legalisation");
7181       SDValue NewOp1 =
7182           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7183       SDValue NewOp2 =
7184           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7185       unsigned Opc =
7186           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7187       // If the control is a constant, promote the node by clearing any extra
7188       // bits bits in the control. isel will form greviw/gorciw if the result is
7189       // sign extended.
7190       if (isa<ConstantSDNode>(NewOp2)) {
7191         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7192                              DAG.getConstant(0x1f, DL, MVT::i64));
7193         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7194       }
7195       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7196       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7197       break;
7198     }
7199     case Intrinsic::riscv_bcompress:
7200     case Intrinsic::riscv_bdecompress:
7201     case Intrinsic::riscv_bfp:
7202     case Intrinsic::riscv_fsl:
7203     case Intrinsic::riscv_fsr: {
7204       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7205              "Unexpected custom legalisation");
7206       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7207       break;
7208     }
7209     case Intrinsic::riscv_orc_b: {
7210       // Lower to the GORCI encoding for orc.b with the operand extended.
7211       SDValue NewOp =
7212           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7213       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7214                                 DAG.getConstant(7, DL, MVT::i64));
7215       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7216       return;
7217     }
7218     case Intrinsic::riscv_shfl:
7219     case Intrinsic::riscv_unshfl: {
7220       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7221              "Unexpected custom legalisation");
7222       SDValue NewOp1 =
7223           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7224       SDValue NewOp2 =
7225           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7226       unsigned Opc =
7227           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7228       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7229       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7230       // will be shuffled the same way as the lower 32 bit half, but the two
7231       // halves won't cross.
7232       if (isa<ConstantSDNode>(NewOp2)) {
7233         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7234                              DAG.getConstant(0xf, DL, MVT::i64));
7235         Opc =
7236             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7237       }
7238       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7239       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7240       break;
7241     }
7242     case Intrinsic::riscv_vmv_x_s: {
7243       EVT VT = N->getValueType(0);
7244       MVT XLenVT = Subtarget.getXLenVT();
7245       if (VT.bitsLT(XLenVT)) {
7246         // Simple case just extract using vmv.x.s and truncate.
7247         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7248                                       Subtarget.getXLenVT(), N->getOperand(1));
7249         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7250         return;
7251       }
7252 
7253       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7254              "Unexpected custom legalization");
7255 
7256       // We need to do the move in two steps.
7257       SDValue Vec = N->getOperand(1);
7258       MVT VecVT = Vec.getSimpleValueType();
7259 
7260       // First extract the lower XLEN bits of the element.
7261       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7262 
7263       // To extract the upper XLEN bits of the vector element, shift the first
7264       // element right by 32 bits and re-extract the lower XLEN bits.
7265       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7266       SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG);
7267 
7268       SDValue ThirtyTwoV =
7269           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7270                       DAG.getConstant(32, DL, XLenVT), VL);
7271       SDValue LShr32 =
7272           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7273       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7274 
7275       Results.push_back(
7276           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7277       break;
7278     }
7279     }
7280     break;
7281   }
7282   case ISD::VECREDUCE_ADD:
7283   case ISD::VECREDUCE_AND:
7284   case ISD::VECREDUCE_OR:
7285   case ISD::VECREDUCE_XOR:
7286   case ISD::VECREDUCE_SMAX:
7287   case ISD::VECREDUCE_UMAX:
7288   case ISD::VECREDUCE_SMIN:
7289   case ISD::VECREDUCE_UMIN:
7290     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7291       Results.push_back(V);
7292     break;
7293   case ISD::VP_REDUCE_ADD:
7294   case ISD::VP_REDUCE_AND:
7295   case ISD::VP_REDUCE_OR:
7296   case ISD::VP_REDUCE_XOR:
7297   case ISD::VP_REDUCE_SMAX:
7298   case ISD::VP_REDUCE_UMAX:
7299   case ISD::VP_REDUCE_SMIN:
7300   case ISD::VP_REDUCE_UMIN:
7301     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7302       Results.push_back(V);
7303     break;
7304   case ISD::FLT_ROUNDS_: {
7305     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7306     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7307     Results.push_back(Res.getValue(0));
7308     Results.push_back(Res.getValue(1));
7309     break;
7310   }
7311   }
7312 }
7313 
7314 // A structure to hold one of the bit-manipulation patterns below. Together, a
7315 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7316 //   (or (and (shl x, 1), 0xAAAAAAAA),
7317 //       (and (srl x, 1), 0x55555555))
7318 struct RISCVBitmanipPat {
7319   SDValue Op;
7320   unsigned ShAmt;
7321   bool IsSHL;
7322 
7323   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7324     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7325   }
7326 };
7327 
7328 // Matches patterns of the form
7329 //   (and (shl x, C2), (C1 << C2))
7330 //   (and (srl x, C2), C1)
7331 //   (shl (and x, C1), C2)
7332 //   (srl (and x, (C1 << C2)), C2)
7333 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7334 // The expected masks for each shift amount are specified in BitmanipMasks where
7335 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7336 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7337 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7338 // XLen is 64.
7339 static Optional<RISCVBitmanipPat>
7340 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7341   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7342          "Unexpected number of masks");
7343   Optional<uint64_t> Mask;
7344   // Optionally consume a mask around the shift operation.
7345   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7346     Mask = Op.getConstantOperandVal(1);
7347     Op = Op.getOperand(0);
7348   }
7349   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7350     return None;
7351   bool IsSHL = Op.getOpcode() == ISD::SHL;
7352 
7353   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7354     return None;
7355   uint64_t ShAmt = Op.getConstantOperandVal(1);
7356 
7357   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7358   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7359     return None;
7360   // If we don't have enough masks for 64 bit, then we must be trying to
7361   // match SHFL so we're only allowed to shift 1/4 of the width.
7362   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7363     return None;
7364 
7365   SDValue Src = Op.getOperand(0);
7366 
7367   // The expected mask is shifted left when the AND is found around SHL
7368   // patterns.
7369   //   ((x >> 1) & 0x55555555)
7370   //   ((x << 1) & 0xAAAAAAAA)
7371   bool SHLExpMask = IsSHL;
7372 
7373   if (!Mask) {
7374     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7375     // the mask is all ones: consume that now.
7376     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7377       Mask = Src.getConstantOperandVal(1);
7378       Src = Src.getOperand(0);
7379       // The expected mask is now in fact shifted left for SRL, so reverse the
7380       // decision.
7381       //   ((x & 0xAAAAAAAA) >> 1)
7382       //   ((x & 0x55555555) << 1)
7383       SHLExpMask = !SHLExpMask;
7384     } else {
7385       // Use a default shifted mask of all-ones if there's no AND, truncated
7386       // down to the expected width. This simplifies the logic later on.
7387       Mask = maskTrailingOnes<uint64_t>(Width);
7388       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7389     }
7390   }
7391 
7392   unsigned MaskIdx = Log2_32(ShAmt);
7393   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7394 
7395   if (SHLExpMask)
7396     ExpMask <<= ShAmt;
7397 
7398   if (Mask != ExpMask)
7399     return None;
7400 
7401   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7402 }
7403 
7404 // Matches any of the following bit-manipulation patterns:
7405 //   (and (shl x, 1), (0x55555555 << 1))
7406 //   (and (srl x, 1), 0x55555555)
7407 //   (shl (and x, 0x55555555), 1)
7408 //   (srl (and x, (0x55555555 << 1)), 1)
7409 // where the shift amount and mask may vary thus:
7410 //   [1]  = 0x55555555 / 0xAAAAAAAA
7411 //   [2]  = 0x33333333 / 0xCCCCCCCC
7412 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7413 //   [8]  = 0x00FF00FF / 0xFF00FF00
7414 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7415 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7416 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7417   // These are the unshifted masks which we use to match bit-manipulation
7418   // patterns. They may be shifted left in certain circumstances.
7419   static const uint64_t BitmanipMasks[] = {
7420       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7421       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7422 
7423   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7424 }
7425 
7426 // Try to fold (<bop> x, (reduction.<bop> vec, start))
7427 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) {
7428   auto BinOpToRVVReduce = [](unsigned Opc) {
7429     switch (Opc) {
7430     default:
7431       llvm_unreachable("Unhandled binary to transfrom reduction");
7432     case ISD::ADD:
7433       return RISCVISD::VECREDUCE_ADD_VL;
7434     case ISD::UMAX:
7435       return RISCVISD::VECREDUCE_UMAX_VL;
7436     case ISD::SMAX:
7437       return RISCVISD::VECREDUCE_SMAX_VL;
7438     case ISD::UMIN:
7439       return RISCVISD::VECREDUCE_UMIN_VL;
7440     case ISD::SMIN:
7441       return RISCVISD::VECREDUCE_SMIN_VL;
7442     case ISD::AND:
7443       return RISCVISD::VECREDUCE_AND_VL;
7444     case ISD::OR:
7445       return RISCVISD::VECREDUCE_OR_VL;
7446     case ISD::XOR:
7447       return RISCVISD::VECREDUCE_XOR_VL;
7448     case ISD::FADD:
7449       return RISCVISD::VECREDUCE_FADD_VL;
7450     case ISD::FMAXNUM:
7451       return RISCVISD::VECREDUCE_FMAX_VL;
7452     case ISD::FMINNUM:
7453       return RISCVISD::VECREDUCE_FMIN_VL;
7454     }
7455   };
7456 
7457   auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) {
7458     return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7459            isNullConstant(V.getOperand(1)) &&
7460            V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc);
7461   };
7462 
7463   unsigned Opc = N->getOpcode();
7464   unsigned ReduceIdx;
7465   if (IsReduction(N->getOperand(0), Opc))
7466     ReduceIdx = 0;
7467   else if (IsReduction(N->getOperand(1), Opc))
7468     ReduceIdx = 1;
7469   else
7470     return SDValue();
7471 
7472   // Skip if FADD disallows reassociation but the combiner needs.
7473   if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation())
7474     return SDValue();
7475 
7476   SDValue Extract = N->getOperand(ReduceIdx);
7477   SDValue Reduce = Extract.getOperand(0);
7478   if (!Reduce.hasOneUse())
7479     return SDValue();
7480 
7481   SDValue ScalarV = Reduce.getOperand(2);
7482 
7483   // Make sure that ScalarV is a splat with VL=1.
7484   if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL &&
7485       ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL &&
7486       ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL)
7487     return SDValue();
7488 
7489   if (!isOneConstant(ScalarV.getOperand(2)))
7490     return SDValue();
7491 
7492   // TODO: Deal with value other than neutral element.
7493   auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) {
7494     if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() &&
7495         isNullFPConstant(V))
7496       return true;
7497     return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(),
7498                                  N->getFlags()) == V;
7499   };
7500 
7501   // Check the scalar of ScalarV is neutral element
7502   if (!IsRVVNeutralElement(N, ScalarV.getOperand(1)))
7503     return SDValue();
7504 
7505   if (!ScalarV.hasOneUse())
7506     return SDValue();
7507 
7508   EVT SplatVT = ScalarV.getValueType();
7509   SDValue NewStart = N->getOperand(1 - ReduceIdx);
7510   unsigned SplatOpc = RISCVISD::VFMV_S_F_VL;
7511   if (SplatVT.isInteger()) {
7512     auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode());
7513     if (!C || C->isZero() || !isInt<5>(C->getSExtValue()))
7514       SplatOpc = RISCVISD::VMV_S_X_VL;
7515     else
7516       SplatOpc = RISCVISD::VMV_V_X_VL;
7517   }
7518 
7519   SDValue NewScalarV =
7520       DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart,
7521                   ScalarV.getOperand(2));
7522   SDValue NewReduce =
7523       DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(),
7524                   Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV,
7525                   Reduce.getOperand(3), Reduce.getOperand(4));
7526   return DAG.getNode(Extract.getOpcode(), SDLoc(Extract),
7527                      Extract.getValueType(), NewReduce, Extract.getOperand(1));
7528 }
7529 
7530 // Match the following pattern as a GREVI(W) operation
7531 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7532 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7533                                const RISCVSubtarget &Subtarget) {
7534   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7535   EVT VT = Op.getValueType();
7536 
7537   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7538     auto LHS = matchGREVIPat(Op.getOperand(0));
7539     auto RHS = matchGREVIPat(Op.getOperand(1));
7540     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7541       SDLoc DL(Op);
7542       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7543                          DAG.getConstant(LHS->ShAmt, DL, VT));
7544     }
7545   }
7546   return SDValue();
7547 }
7548 
7549 // Matches any the following pattern as a GORCI(W) operation
7550 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7551 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7552 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7553 // Note that with the variant of 3.,
7554 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7555 // the inner pattern will first be matched as GREVI and then the outer
7556 // pattern will be matched to GORC via the first rule above.
7557 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7558 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7559                                const RISCVSubtarget &Subtarget) {
7560   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7561   EVT VT = Op.getValueType();
7562 
7563   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7564     SDLoc DL(Op);
7565     SDValue Op0 = Op.getOperand(0);
7566     SDValue Op1 = Op.getOperand(1);
7567 
7568     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7569       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7570           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7571           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7572         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7573       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7574       if ((Reverse.getOpcode() == ISD::ROTL ||
7575            Reverse.getOpcode() == ISD::ROTR) &&
7576           Reverse.getOperand(0) == X &&
7577           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7578         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7579         if (RotAmt == (VT.getSizeInBits() / 2))
7580           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7581                              DAG.getConstant(RotAmt, DL, VT));
7582       }
7583       return SDValue();
7584     };
7585 
7586     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7587     if (SDValue V = MatchOROfReverse(Op0, Op1))
7588       return V;
7589     if (SDValue V = MatchOROfReverse(Op1, Op0))
7590       return V;
7591 
7592     // OR is commutable so canonicalize its OR operand to the left
7593     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7594       std::swap(Op0, Op1);
7595     if (Op0.getOpcode() != ISD::OR)
7596       return SDValue();
7597     SDValue OrOp0 = Op0.getOperand(0);
7598     SDValue OrOp1 = Op0.getOperand(1);
7599     auto LHS = matchGREVIPat(OrOp0);
7600     // OR is commutable so swap the operands and try again: x might have been
7601     // on the left
7602     if (!LHS) {
7603       std::swap(OrOp0, OrOp1);
7604       LHS = matchGREVIPat(OrOp0);
7605     }
7606     auto RHS = matchGREVIPat(Op1);
7607     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7608       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7609                          DAG.getConstant(LHS->ShAmt, DL, VT));
7610     }
7611   }
7612   return SDValue();
7613 }
7614 
7615 // Matches any of the following bit-manipulation patterns:
7616 //   (and (shl x, 1), (0x22222222 << 1))
7617 //   (and (srl x, 1), 0x22222222)
7618 //   (shl (and x, 0x22222222), 1)
7619 //   (srl (and x, (0x22222222 << 1)), 1)
7620 // where the shift amount and mask may vary thus:
7621 //   [1]  = 0x22222222 / 0x44444444
7622 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7623 //   [4]  = 0x00F000F0 / 0x0F000F00
7624 //   [8]  = 0x0000FF00 / 0x00FF0000
7625 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7626 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7627   // These are the unshifted masks which we use to match bit-manipulation
7628   // patterns. They may be shifted left in certain circumstances.
7629   static const uint64_t BitmanipMasks[] = {
7630       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7631       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7632 
7633   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7634 }
7635 
7636 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7637 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7638                                const RISCVSubtarget &Subtarget) {
7639   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7640   EVT VT = Op.getValueType();
7641 
7642   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7643     return SDValue();
7644 
7645   SDValue Op0 = Op.getOperand(0);
7646   SDValue Op1 = Op.getOperand(1);
7647 
7648   // Or is commutable so canonicalize the second OR to the LHS.
7649   if (Op0.getOpcode() != ISD::OR)
7650     std::swap(Op0, Op1);
7651   if (Op0.getOpcode() != ISD::OR)
7652     return SDValue();
7653 
7654   // We found an inner OR, so our operands are the operands of the inner OR
7655   // and the other operand of the outer OR.
7656   SDValue A = Op0.getOperand(0);
7657   SDValue B = Op0.getOperand(1);
7658   SDValue C = Op1;
7659 
7660   auto Match1 = matchSHFLPat(A);
7661   auto Match2 = matchSHFLPat(B);
7662 
7663   // If neither matched, we failed.
7664   if (!Match1 && !Match2)
7665     return SDValue();
7666 
7667   // We had at least one match. if one failed, try the remaining C operand.
7668   if (!Match1) {
7669     std::swap(A, C);
7670     Match1 = matchSHFLPat(A);
7671     if (!Match1)
7672       return SDValue();
7673   } else if (!Match2) {
7674     std::swap(B, C);
7675     Match2 = matchSHFLPat(B);
7676     if (!Match2)
7677       return SDValue();
7678   }
7679   assert(Match1 && Match2);
7680 
7681   // Make sure our matches pair up.
7682   if (!Match1->formsPairWith(*Match2))
7683     return SDValue();
7684 
7685   // All the remains is to make sure C is an AND with the same input, that masks
7686   // out the bits that are being shuffled.
7687   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7688       C.getOperand(0) != Match1->Op)
7689     return SDValue();
7690 
7691   uint64_t Mask = C.getConstantOperandVal(1);
7692 
7693   static const uint64_t BitmanipMasks[] = {
7694       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7695       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7696   };
7697 
7698   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7699   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7700   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7701 
7702   if (Mask != ExpMask)
7703     return SDValue();
7704 
7705   SDLoc DL(Op);
7706   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7707                      DAG.getConstant(Match1->ShAmt, DL, VT));
7708 }
7709 
7710 // Optimize (add (shl x, c0), (shl y, c1)) ->
7711 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7712 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7713                                   const RISCVSubtarget &Subtarget) {
7714   // Perform this optimization only in the zba extension.
7715   if (!Subtarget.hasStdExtZba())
7716     return SDValue();
7717 
7718   // Skip for vector types and larger types.
7719   EVT VT = N->getValueType(0);
7720   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7721     return SDValue();
7722 
7723   // The two operand nodes must be SHL and have no other use.
7724   SDValue N0 = N->getOperand(0);
7725   SDValue N1 = N->getOperand(1);
7726   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7727       !N0->hasOneUse() || !N1->hasOneUse())
7728     return SDValue();
7729 
7730   // Check c0 and c1.
7731   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7732   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7733   if (!N0C || !N1C)
7734     return SDValue();
7735   int64_t C0 = N0C->getSExtValue();
7736   int64_t C1 = N1C->getSExtValue();
7737   if (C0 <= 0 || C1 <= 0)
7738     return SDValue();
7739 
7740   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7741   int64_t Bits = std::min(C0, C1);
7742   int64_t Diff = std::abs(C0 - C1);
7743   if (Diff != 1 && Diff != 2 && Diff != 3)
7744     return SDValue();
7745 
7746   // Build nodes.
7747   SDLoc DL(N);
7748   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7749   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7750   SDValue NA0 =
7751       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7752   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7753   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7754 }
7755 
7756 // Combine
7757 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7758 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7759 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7760 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7761 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7762 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7763 // The grev patterns represents BSWAP.
7764 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7765 // off the grev.
7766 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7767                                           const RISCVSubtarget &Subtarget) {
7768   bool IsWInstruction =
7769       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7770   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7771           IsWInstruction) &&
7772          "Unexpected opcode!");
7773   SDValue Src = N->getOperand(0);
7774   EVT VT = N->getValueType(0);
7775   SDLoc DL(N);
7776 
7777   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7778     return SDValue();
7779 
7780   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7781       !isa<ConstantSDNode>(Src.getOperand(1)))
7782     return SDValue();
7783 
7784   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7785   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7786 
7787   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7788   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7789   unsigned ShAmt1 = N->getConstantOperandVal(1);
7790   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7791   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7792     return SDValue();
7793 
7794   Src = Src.getOperand(0);
7795 
7796   // Toggle bit the MSB of the shift.
7797   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7798   if (CombinedShAmt == 0)
7799     return Src;
7800 
7801   SDValue Res = DAG.getNode(
7802       RISCVISD::GREV, DL, VT, Src,
7803       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7804   if (!IsWInstruction)
7805     return Res;
7806 
7807   // Sign extend the result to match the behavior of the rotate. This will be
7808   // selected to GREVIW in isel.
7809   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7810                      DAG.getValueType(MVT::i32));
7811 }
7812 
7813 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7814 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7815 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7816 // not undo itself, but they are redundant.
7817 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7818   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7819   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7820   SDValue Src = N->getOperand(0);
7821 
7822   if (Src.getOpcode() != N->getOpcode())
7823     return SDValue();
7824 
7825   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7826       !isa<ConstantSDNode>(Src.getOperand(1)))
7827     return SDValue();
7828 
7829   unsigned ShAmt1 = N->getConstantOperandVal(1);
7830   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7831   Src = Src.getOperand(0);
7832 
7833   unsigned CombinedShAmt;
7834   if (IsGORC)
7835     CombinedShAmt = ShAmt1 | ShAmt2;
7836   else
7837     CombinedShAmt = ShAmt1 ^ ShAmt2;
7838 
7839   if (CombinedShAmt == 0)
7840     return Src;
7841 
7842   SDLoc DL(N);
7843   return DAG.getNode(
7844       N->getOpcode(), DL, N->getValueType(0), Src,
7845       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7846 }
7847 
7848 // Combine a constant select operand into its use:
7849 //
7850 // (and (select cond, -1, c), x)
7851 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7852 // (or  (select cond, 0, c), x)
7853 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7854 // (xor (select cond, 0, c), x)
7855 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7856 // (add (select cond, 0, c), x)
7857 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7858 // (sub x, (select cond, 0, c))
7859 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7860 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7861                                    SelectionDAG &DAG, bool AllOnes) {
7862   EVT VT = N->getValueType(0);
7863 
7864   // Skip vectors.
7865   if (VT.isVector())
7866     return SDValue();
7867 
7868   if ((Slct.getOpcode() != ISD::SELECT &&
7869        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7870       !Slct.hasOneUse())
7871     return SDValue();
7872 
7873   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7874     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7875   };
7876 
7877   bool SwapSelectOps;
7878   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7879   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7880   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7881   SDValue NonConstantVal;
7882   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7883     SwapSelectOps = false;
7884     NonConstantVal = FalseVal;
7885   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7886     SwapSelectOps = true;
7887     NonConstantVal = TrueVal;
7888   } else
7889     return SDValue();
7890 
7891   // Slct is now know to be the desired identity constant when CC is true.
7892   TrueVal = OtherOp;
7893   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7894   // Unless SwapSelectOps says the condition should be false.
7895   if (SwapSelectOps)
7896     std::swap(TrueVal, FalseVal);
7897 
7898   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7899     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7900                        {Slct.getOperand(0), Slct.getOperand(1),
7901                         Slct.getOperand(2), TrueVal, FalseVal});
7902 
7903   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7904                      {Slct.getOperand(0), TrueVal, FalseVal});
7905 }
7906 
7907 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7908 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7909                                               bool AllOnes) {
7910   SDValue N0 = N->getOperand(0);
7911   SDValue N1 = N->getOperand(1);
7912   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7913     return Result;
7914   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7915     return Result;
7916   return SDValue();
7917 }
7918 
7919 // Transform (add (mul x, c0), c1) ->
7920 //           (add (mul (add x, c1/c0), c0), c1%c0).
7921 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7922 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7923 // to an infinite loop in DAGCombine if transformed.
7924 // Or transform (add (mul x, c0), c1) ->
7925 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7926 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7927 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7928 // lead to an infinite loop in DAGCombine if transformed.
7929 // Or transform (add (mul x, c0), c1) ->
7930 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7931 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7932 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7933 // lead to an infinite loop in DAGCombine if transformed.
7934 // Or transform (add (mul x, c0), c1) ->
7935 //              (mul (add x, c1/c0), c0).
7936 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7937 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7938                                      const RISCVSubtarget &Subtarget) {
7939   // Skip for vector types and larger types.
7940   EVT VT = N->getValueType(0);
7941   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7942     return SDValue();
7943   // The first operand node must be a MUL and has no other use.
7944   SDValue N0 = N->getOperand(0);
7945   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7946     return SDValue();
7947   // Check if c0 and c1 match above conditions.
7948   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7949   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7950   if (!N0C || !N1C)
7951     return SDValue();
7952   // If N0C has multiple uses it's possible one of the cases in
7953   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7954   // in an infinite loop.
7955   if (!N0C->hasOneUse())
7956     return SDValue();
7957   int64_t C0 = N0C->getSExtValue();
7958   int64_t C1 = N1C->getSExtValue();
7959   int64_t CA, CB;
7960   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7961     return SDValue();
7962   // Search for proper CA (non-zero) and CB that both are simm12.
7963   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7964       !isInt<12>(C0 * (C1 / C0))) {
7965     CA = C1 / C0;
7966     CB = C1 % C0;
7967   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7968              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7969     CA = C1 / C0 + 1;
7970     CB = C1 % C0 - C0;
7971   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7972              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7973     CA = C1 / C0 - 1;
7974     CB = C1 % C0 + C0;
7975   } else
7976     return SDValue();
7977   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7978   SDLoc DL(N);
7979   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7980                              DAG.getConstant(CA, DL, VT));
7981   SDValue New1 =
7982       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7983   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7984 }
7985 
7986 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7987                                  const RISCVSubtarget &Subtarget) {
7988   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7989     return V;
7990   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7991     return V;
7992   if (SDValue V = combineBinOpToReduce(N, DAG))
7993     return V;
7994   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7995   //      (select lhs, rhs, cc, x, (add x, y))
7996   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7997 }
7998 
7999 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
8000   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
8001   //      (select lhs, rhs, cc, x, (sub x, y))
8002   SDValue N0 = N->getOperand(0);
8003   SDValue N1 = N->getOperand(1);
8004   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
8005 }
8006 
8007 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
8008                                  const RISCVSubtarget &Subtarget) {
8009   SDValue N0 = N->getOperand(0);
8010   // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero
8011   // extending X. This is safe since we only need the LSB after the shift and
8012   // shift amounts larger than 31 would produce poison. If we wait until
8013   // type legalization, we'll create RISCVISD::SRLW and we can't recover it
8014   // to use a BEXT instruction.
8015   if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() &&
8016       N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) &&
8017       N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) &&
8018       N0.hasOneUse()) {
8019     SDLoc DL(N);
8020     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0));
8021     SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1));
8022     SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1);
8023     SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl,
8024                               DAG.getConstant(1, DL, MVT::i64));
8025     return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And);
8026   }
8027 
8028   if (SDValue V = combineBinOpToReduce(N, DAG))
8029     return V;
8030 
8031   // fold (and (select lhs, rhs, cc, -1, y), x) ->
8032   //      (select lhs, rhs, cc, x, (and x, y))
8033   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
8034 }
8035 
8036 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
8037                                 const RISCVSubtarget &Subtarget) {
8038   if (Subtarget.hasStdExtZbp()) {
8039     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
8040       return GREV;
8041     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
8042       return GORC;
8043     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
8044       return SHFL;
8045   }
8046 
8047   if (SDValue V = combineBinOpToReduce(N, DAG))
8048     return V;
8049   // fold (or (select cond, 0, y), x) ->
8050   //      (select cond, x, (or x, y))
8051   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8052 }
8053 
8054 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
8055   SDValue N0 = N->getOperand(0);
8056   SDValue N1 = N->getOperand(1);
8057 
8058   // fold (xor (sllw 1, x), -1) -> (rolw ~1, x)
8059   // NOTE: Assumes ROL being legal means ROLW is legal.
8060   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8061   if (N0.getOpcode() == RISCVISD::SLLW &&
8062       isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) &&
8063       TLI.isOperationLegal(ISD::ROTL, MVT::i64)) {
8064     SDLoc DL(N);
8065     return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64,
8066                        DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1));
8067   }
8068 
8069   if (SDValue V = combineBinOpToReduce(N, DAG))
8070     return V;
8071   // fold (xor (select cond, 0, y), x) ->
8072   //      (select cond, x, (xor x, y))
8073   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
8074 }
8075 
8076 static SDValue
8077 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
8078                                 const RISCVSubtarget &Subtarget) {
8079   SDValue Src = N->getOperand(0);
8080   EVT VT = N->getValueType(0);
8081 
8082   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
8083   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8084       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
8085     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
8086                        Src.getOperand(0));
8087 
8088   // Fold (i64 (sext_inreg (abs X), i32)) ->
8089   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
8090   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
8091   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
8092   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
8093   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
8094   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
8095   // may get combined into an earlier operation so we need to use
8096   // ComputeNumSignBits.
8097   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
8098   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
8099   // we can't assume that X has 33 sign bits. We must check.
8100   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
8101       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
8102       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
8103       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
8104     SDLoc DL(N);
8105     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
8106     SDValue Neg =
8107         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
8108     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
8109                       DAG.getValueType(MVT::i32));
8110     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
8111   }
8112 
8113   return SDValue();
8114 }
8115 
8116 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
8117 // vwadd(u).vv/vx or vwsub(u).vv/vx.
8118 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
8119                                              bool Commute = false) {
8120   assert((N->getOpcode() == RISCVISD::ADD_VL ||
8121           N->getOpcode() == RISCVISD::SUB_VL) &&
8122          "Unexpected opcode");
8123   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
8124   SDValue Op0 = N->getOperand(0);
8125   SDValue Op1 = N->getOperand(1);
8126   if (Commute)
8127     std::swap(Op0, Op1);
8128 
8129   MVT VT = N->getSimpleValueType(0);
8130 
8131   // Determine the narrow size for a widening add/sub.
8132   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8133   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8134                                   VT.getVectorElementCount());
8135 
8136   SDValue Mask = N->getOperand(2);
8137   SDValue VL = N->getOperand(3);
8138 
8139   SDLoc DL(N);
8140 
8141   // If the RHS is a sext or zext, we can form a widening op.
8142   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
8143        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
8144       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
8145     unsigned ExtOpc = Op1.getOpcode();
8146     Op1 = Op1.getOperand(0);
8147     // Re-introduce narrower extends if needed.
8148     if (Op1.getValueType() != NarrowVT)
8149       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8150 
8151     unsigned WOpc;
8152     if (ExtOpc == RISCVISD::VSEXT_VL)
8153       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
8154     else
8155       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
8156 
8157     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
8158   }
8159 
8160   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
8161   // sext/zext?
8162 
8163   return SDValue();
8164 }
8165 
8166 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
8167 // vwsub(u).vv/vx.
8168 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
8169   SDValue Op0 = N->getOperand(0);
8170   SDValue Op1 = N->getOperand(1);
8171   SDValue Mask = N->getOperand(2);
8172   SDValue VL = N->getOperand(3);
8173 
8174   MVT VT = N->getSimpleValueType(0);
8175   MVT NarrowVT = Op1.getSimpleValueType();
8176   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
8177 
8178   unsigned VOpc;
8179   switch (N->getOpcode()) {
8180   default: llvm_unreachable("Unexpected opcode");
8181   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
8182   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8183   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8184   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8185   }
8186 
8187   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8188                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8189 
8190   SDLoc DL(N);
8191 
8192   // If the LHS is a sext or zext, we can narrow this op to the same size as
8193   // the RHS.
8194   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8195        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8196       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8197     unsigned ExtOpc = Op0.getOpcode();
8198     Op0 = Op0.getOperand(0);
8199     // Re-introduce narrower extends if needed.
8200     if (Op0.getValueType() != NarrowVT)
8201       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8202     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8203   }
8204 
8205   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8206                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8207 
8208   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8209   // to commute and use a vwadd(u).vx instead.
8210   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8211       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8212     Op0 = Op0.getOperand(1);
8213 
8214     // See if have enough sign bits or zero bits in the scalar to use a
8215     // widening add/sub by splatting to smaller element size.
8216     unsigned EltBits = VT.getScalarSizeInBits();
8217     unsigned ScalarBits = Op0.getValueSizeInBits();
8218     // Make sure we're getting all element bits from the scalar register.
8219     // FIXME: Support implicit sign extension of vmv.v.x?
8220     if (ScalarBits < EltBits)
8221       return SDValue();
8222 
8223     if (IsSigned) {
8224       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8225         return SDValue();
8226     } else {
8227       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8228       if (!DAG.MaskedValueIsZero(Op0, Mask))
8229         return SDValue();
8230     }
8231 
8232     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8233                       DAG.getUNDEF(NarrowVT), Op0, VL);
8234     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8235   }
8236 
8237   return SDValue();
8238 }
8239 
8240 // Try to form VWMUL, VWMULU or VWMULSU.
8241 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8242 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8243                                        bool Commute) {
8244   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8245   SDValue Op0 = N->getOperand(0);
8246   SDValue Op1 = N->getOperand(1);
8247   if (Commute)
8248     std::swap(Op0, Op1);
8249 
8250   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8251   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8252   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8253   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8254     return SDValue();
8255 
8256   SDValue Mask = N->getOperand(2);
8257   SDValue VL = N->getOperand(3);
8258 
8259   // Make sure the mask and VL match.
8260   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8261     return SDValue();
8262 
8263   MVT VT = N->getSimpleValueType(0);
8264 
8265   // Determine the narrow size for a widening multiply.
8266   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8267   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8268                                   VT.getVectorElementCount());
8269 
8270   SDLoc DL(N);
8271 
8272   // See if the other operand is the same opcode.
8273   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8274     if (!Op1.hasOneUse())
8275       return SDValue();
8276 
8277     // Make sure the mask and VL match.
8278     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8279       return SDValue();
8280 
8281     Op1 = Op1.getOperand(0);
8282   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8283     // The operand is a splat of a scalar.
8284 
8285     // The pasthru must be undef for tail agnostic
8286     if (!Op1.getOperand(0).isUndef())
8287       return SDValue();
8288     // The VL must be the same.
8289     if (Op1.getOperand(2) != VL)
8290       return SDValue();
8291 
8292     // Get the scalar value.
8293     Op1 = Op1.getOperand(1);
8294 
8295     // See if have enough sign bits or zero bits in the scalar to use a
8296     // widening multiply by splatting to smaller element size.
8297     unsigned EltBits = VT.getScalarSizeInBits();
8298     unsigned ScalarBits = Op1.getValueSizeInBits();
8299     // Make sure we're getting all element bits from the scalar register.
8300     // FIXME: Support implicit sign extension of vmv.v.x?
8301     if (ScalarBits < EltBits)
8302       return SDValue();
8303 
8304     // If the LHS is a sign extend, try to use vwmul.
8305     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8306       // Can use vwmul.
8307     } else {
8308       // Otherwise try to use vwmulu or vwmulsu.
8309       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8310       if (DAG.MaskedValueIsZero(Op1, Mask))
8311         IsVWMULSU = IsSignExt;
8312       else
8313         return SDValue();
8314     }
8315 
8316     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8317                       DAG.getUNDEF(NarrowVT), Op1, VL);
8318   } else
8319     return SDValue();
8320 
8321   Op0 = Op0.getOperand(0);
8322 
8323   // Re-introduce narrower extends if needed.
8324   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8325   if (Op0.getValueType() != NarrowVT)
8326     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8327   // vwmulsu requires second operand to be zero extended.
8328   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8329   if (Op1.getValueType() != NarrowVT)
8330     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8331 
8332   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8333   if (!IsVWMULSU)
8334     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8335   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8336 }
8337 
8338 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8339   switch (Op.getOpcode()) {
8340   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8341   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8342   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8343   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8344   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8345   }
8346 
8347   return RISCVFPRndMode::Invalid;
8348 }
8349 
8350 // Fold
8351 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8352 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8353 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8354 //   (fp_to_int (fceil X))      -> fcvt X, rup
8355 //   (fp_to_int (fround X))     -> fcvt X, rmm
8356 static SDValue performFP_TO_INTCombine(SDNode *N,
8357                                        TargetLowering::DAGCombinerInfo &DCI,
8358                                        const RISCVSubtarget &Subtarget) {
8359   SelectionDAG &DAG = DCI.DAG;
8360   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8361   MVT XLenVT = Subtarget.getXLenVT();
8362 
8363   // Only handle XLen or i32 types. Other types narrower than XLen will
8364   // eventually be legalized to XLenVT.
8365   EVT VT = N->getValueType(0);
8366   if (VT != MVT::i32 && VT != XLenVT)
8367     return SDValue();
8368 
8369   SDValue Src = N->getOperand(0);
8370 
8371   // Ensure the FP type is also legal.
8372   if (!TLI.isTypeLegal(Src.getValueType()))
8373     return SDValue();
8374 
8375   // Don't do this for f16 with Zfhmin and not Zfh.
8376   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8377     return SDValue();
8378 
8379   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8380   if (FRM == RISCVFPRndMode::Invalid)
8381     return SDValue();
8382 
8383   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8384 
8385   unsigned Opc;
8386   if (VT == XLenVT)
8387     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8388   else
8389     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8390 
8391   SDLoc DL(N);
8392   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8393                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8394   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8395 }
8396 
8397 // Fold
8398 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8399 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8400 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8401 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8402 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8403 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8404                                        TargetLowering::DAGCombinerInfo &DCI,
8405                                        const RISCVSubtarget &Subtarget) {
8406   SelectionDAG &DAG = DCI.DAG;
8407   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8408   MVT XLenVT = Subtarget.getXLenVT();
8409 
8410   // Only handle XLen types. Other types narrower than XLen will eventually be
8411   // legalized to XLenVT.
8412   EVT DstVT = N->getValueType(0);
8413   if (DstVT != XLenVT)
8414     return SDValue();
8415 
8416   SDValue Src = N->getOperand(0);
8417 
8418   // Ensure the FP type is also legal.
8419   if (!TLI.isTypeLegal(Src.getValueType()))
8420     return SDValue();
8421 
8422   // Don't do this for f16 with Zfhmin and not Zfh.
8423   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8424     return SDValue();
8425 
8426   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8427 
8428   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8429   if (FRM == RISCVFPRndMode::Invalid)
8430     return SDValue();
8431 
8432   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8433 
8434   unsigned Opc;
8435   if (SatVT == DstVT)
8436     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8437   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8438     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8439   else
8440     return SDValue();
8441   // FIXME: Support other SatVTs by clamping before or after the conversion.
8442 
8443   Src = Src.getOperand(0);
8444 
8445   SDLoc DL(N);
8446   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8447                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8448 
8449   // RISCV FP-to-int conversions saturate to the destination register size, but
8450   // don't produce 0 for nan.
8451   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8452   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8453 }
8454 
8455 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8456 // smaller than XLenVT.
8457 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8458                                         const RISCVSubtarget &Subtarget) {
8459   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8460 
8461   SDValue Src = N->getOperand(0);
8462   if (Src.getOpcode() != ISD::BSWAP)
8463     return SDValue();
8464 
8465   EVT VT = N->getValueType(0);
8466   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8467       !isPowerOf2_32(VT.getSizeInBits()))
8468     return SDValue();
8469 
8470   SDLoc DL(N);
8471   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8472                      DAG.getConstant(7, DL, VT));
8473 }
8474 
8475 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8476                                                DAGCombinerInfo &DCI) const {
8477   SelectionDAG &DAG = DCI.DAG;
8478 
8479   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8480   // bits are demanded. N will be added to the Worklist if it was not deleted.
8481   // Caller should return SDValue(N, 0) if this returns true.
8482   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8483     SDValue Op = N->getOperand(OpNo);
8484     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8485     if (!SimplifyDemandedBits(Op, Mask, DCI))
8486       return false;
8487 
8488     if (N->getOpcode() != ISD::DELETED_NODE)
8489       DCI.AddToWorklist(N);
8490     return true;
8491   };
8492 
8493   switch (N->getOpcode()) {
8494   default:
8495     break;
8496   case RISCVISD::SplitF64: {
8497     SDValue Op0 = N->getOperand(0);
8498     // If the input to SplitF64 is just BuildPairF64 then the operation is
8499     // redundant. Instead, use BuildPairF64's operands directly.
8500     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8501       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8502 
8503     if (Op0->isUndef()) {
8504       SDValue Lo = DAG.getUNDEF(MVT::i32);
8505       SDValue Hi = DAG.getUNDEF(MVT::i32);
8506       return DCI.CombineTo(N, Lo, Hi);
8507     }
8508 
8509     SDLoc DL(N);
8510 
8511     // It's cheaper to materialise two 32-bit integers than to load a double
8512     // from the constant pool and transfer it to integer registers through the
8513     // stack.
8514     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8515       APInt V = C->getValueAPF().bitcastToAPInt();
8516       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8517       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8518       return DCI.CombineTo(N, Lo, Hi);
8519     }
8520 
8521     // This is a target-specific version of a DAGCombine performed in
8522     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8523     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8524     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8525     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8526         !Op0.getNode()->hasOneUse())
8527       break;
8528     SDValue NewSplitF64 =
8529         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8530                     Op0.getOperand(0));
8531     SDValue Lo = NewSplitF64.getValue(0);
8532     SDValue Hi = NewSplitF64.getValue(1);
8533     APInt SignBit = APInt::getSignMask(32);
8534     if (Op0.getOpcode() == ISD::FNEG) {
8535       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8536                                   DAG.getConstant(SignBit, DL, MVT::i32));
8537       return DCI.CombineTo(N, Lo, NewHi);
8538     }
8539     assert(Op0.getOpcode() == ISD::FABS);
8540     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8541                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8542     return DCI.CombineTo(N, Lo, NewHi);
8543   }
8544   case RISCVISD::SLLW:
8545   case RISCVISD::SRAW:
8546   case RISCVISD::SRLW: {
8547     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8548     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8549         SimplifyDemandedLowBitsHelper(1, 5))
8550       return SDValue(N, 0);
8551 
8552     break;
8553   }
8554   case ISD::ROTR:
8555   case ISD::ROTL:
8556   case RISCVISD::RORW:
8557   case RISCVISD::ROLW: {
8558     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8559       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8560       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8561           SimplifyDemandedLowBitsHelper(1, 5))
8562         return SDValue(N, 0);
8563     }
8564 
8565     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8566   }
8567   case RISCVISD::CLZW:
8568   case RISCVISD::CTZW: {
8569     // Only the lower 32 bits of the first operand are read
8570     if (SimplifyDemandedLowBitsHelper(0, 32))
8571       return SDValue(N, 0);
8572     break;
8573   }
8574   case RISCVISD::GREV:
8575   case RISCVISD::GORC: {
8576     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8577     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8578     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8579     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8580       return SDValue(N, 0);
8581 
8582     return combineGREVI_GORCI(N, DAG);
8583   }
8584   case RISCVISD::GREVW:
8585   case RISCVISD::GORCW: {
8586     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8587     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8588         SimplifyDemandedLowBitsHelper(1, 5))
8589       return SDValue(N, 0);
8590 
8591     break;
8592   }
8593   case RISCVISD::SHFL:
8594   case RISCVISD::UNSHFL: {
8595     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8596     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8597     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8598     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8599       return SDValue(N, 0);
8600 
8601     break;
8602   }
8603   case RISCVISD::SHFLW:
8604   case RISCVISD::UNSHFLW: {
8605     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8606     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8607         SimplifyDemandedLowBitsHelper(1, 4))
8608       return SDValue(N, 0);
8609 
8610     break;
8611   }
8612   case RISCVISD::BCOMPRESSW:
8613   case RISCVISD::BDECOMPRESSW: {
8614     // Only the lower 32 bits of LHS and RHS are read.
8615     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8616         SimplifyDemandedLowBitsHelper(1, 32))
8617       return SDValue(N, 0);
8618 
8619     break;
8620   }
8621   case RISCVISD::FSR:
8622   case RISCVISD::FSL:
8623   case RISCVISD::FSRW:
8624   case RISCVISD::FSLW: {
8625     bool IsWInstruction =
8626         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8627     unsigned BitWidth =
8628         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8629     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8630     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8631     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8632       return SDValue(N, 0);
8633 
8634     break;
8635   }
8636   case RISCVISD::FMV_X_ANYEXTH:
8637   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8638     SDLoc DL(N);
8639     SDValue Op0 = N->getOperand(0);
8640     MVT VT = N->getSimpleValueType(0);
8641     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8642     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8643     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8644     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8645          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8646         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8647          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8648       assert(Op0.getOperand(0).getValueType() == VT &&
8649              "Unexpected value type!");
8650       return Op0.getOperand(0);
8651     }
8652 
8653     // This is a target-specific version of a DAGCombine performed in
8654     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8655     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8656     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8657     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8658         !Op0.getNode()->hasOneUse())
8659       break;
8660     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8661     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8662     APInt SignBit = APInt::getSignMask(FPBits).sext(VT.getSizeInBits());
8663     if (Op0.getOpcode() == ISD::FNEG)
8664       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8665                          DAG.getConstant(SignBit, DL, VT));
8666 
8667     assert(Op0.getOpcode() == ISD::FABS);
8668     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8669                        DAG.getConstant(~SignBit, DL, VT));
8670   }
8671   case ISD::ADD:
8672     return performADDCombine(N, DAG, Subtarget);
8673   case ISD::SUB:
8674     return performSUBCombine(N, DAG);
8675   case ISD::AND:
8676     return performANDCombine(N, DAG, Subtarget);
8677   case ISD::OR:
8678     return performORCombine(N, DAG, Subtarget);
8679   case ISD::XOR:
8680     return performXORCombine(N, DAG);
8681   case ISD::FADD:
8682   case ISD::UMAX:
8683   case ISD::UMIN:
8684   case ISD::SMAX:
8685   case ISD::SMIN:
8686   case ISD::FMAXNUM:
8687   case ISD::FMINNUM:
8688     return combineBinOpToReduce(N, DAG);
8689   case ISD::SIGN_EXTEND_INREG:
8690     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8691   case ISD::ZERO_EXTEND:
8692     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8693     // type legalization. This is safe because fp_to_uint produces poison if
8694     // it overflows.
8695     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8696       SDValue Src = N->getOperand(0);
8697       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8698           isTypeLegal(Src.getOperand(0).getValueType()))
8699         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8700                            Src.getOperand(0));
8701       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8702           isTypeLegal(Src.getOperand(1).getValueType())) {
8703         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8704         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8705                                   Src.getOperand(0), Src.getOperand(1));
8706         DCI.CombineTo(N, Res);
8707         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8708         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8709         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8710       }
8711     }
8712     return SDValue();
8713   case RISCVISD::SELECT_CC: {
8714     // Transform
8715     SDValue LHS = N->getOperand(0);
8716     SDValue RHS = N->getOperand(1);
8717     SDValue TrueV = N->getOperand(3);
8718     SDValue FalseV = N->getOperand(4);
8719 
8720     // If the True and False values are the same, we don't need a select_cc.
8721     if (TrueV == FalseV)
8722       return TrueV;
8723 
8724     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8725     if (!ISD::isIntEqualitySetCC(CCVal))
8726       break;
8727 
8728     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8729     //      (select_cc X, Y, lt, trueV, falseV)
8730     // Sometimes the setcc is introduced after select_cc has been formed.
8731     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8732         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8733       // If we're looking for eq 0 instead of ne 0, we need to invert the
8734       // condition.
8735       bool Invert = CCVal == ISD::SETEQ;
8736       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8737       if (Invert)
8738         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8739 
8740       SDLoc DL(N);
8741       RHS = LHS.getOperand(1);
8742       LHS = LHS.getOperand(0);
8743       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8744 
8745       SDValue TargetCC = DAG.getCondCode(CCVal);
8746       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8747                          {LHS, RHS, TargetCC, TrueV, FalseV});
8748     }
8749 
8750     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8751     //      (select_cc X, Y, eq/ne, trueV, falseV)
8752     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8753       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8754                          {LHS.getOperand(0), LHS.getOperand(1),
8755                           N->getOperand(2), TrueV, FalseV});
8756     // (select_cc X, 1, setne, trueV, falseV) ->
8757     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8758     // This can occur when legalizing some floating point comparisons.
8759     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8760     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8761       SDLoc DL(N);
8762       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8763       SDValue TargetCC = DAG.getCondCode(CCVal);
8764       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8765       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8766                          {LHS, RHS, TargetCC, TrueV, FalseV});
8767     }
8768 
8769     break;
8770   }
8771   case RISCVISD::BR_CC: {
8772     SDValue LHS = N->getOperand(1);
8773     SDValue RHS = N->getOperand(2);
8774     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8775     if (!ISD::isIntEqualitySetCC(CCVal))
8776       break;
8777 
8778     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8779     //      (br_cc X, Y, lt, dest)
8780     // Sometimes the setcc is introduced after br_cc has been formed.
8781     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8782         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8783       // If we're looking for eq 0 instead of ne 0, we need to invert the
8784       // condition.
8785       bool Invert = CCVal == ISD::SETEQ;
8786       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8787       if (Invert)
8788         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8789 
8790       SDLoc DL(N);
8791       RHS = LHS.getOperand(1);
8792       LHS = LHS.getOperand(0);
8793       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8794 
8795       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8796                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8797                          N->getOperand(4));
8798     }
8799 
8800     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8801     //      (br_cc X, Y, eq/ne, trueV, falseV)
8802     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8803       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8804                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8805                          N->getOperand(3), N->getOperand(4));
8806 
8807     // (br_cc X, 1, setne, br_cc) ->
8808     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8809     // This can occur when legalizing some floating point comparisons.
8810     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8811     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8812       SDLoc DL(N);
8813       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8814       SDValue TargetCC = DAG.getCondCode(CCVal);
8815       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8816       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8817                          N->getOperand(0), LHS, RHS, TargetCC,
8818                          N->getOperand(4));
8819     }
8820     break;
8821   }
8822   case ISD::BITREVERSE:
8823     return performBITREVERSECombine(N, DAG, Subtarget);
8824   case ISD::FP_TO_SINT:
8825   case ISD::FP_TO_UINT:
8826     return performFP_TO_INTCombine(N, DCI, Subtarget);
8827   case ISD::FP_TO_SINT_SAT:
8828   case ISD::FP_TO_UINT_SAT:
8829     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8830   case ISD::FCOPYSIGN: {
8831     EVT VT = N->getValueType(0);
8832     if (!VT.isVector())
8833       break;
8834     // There is a form of VFSGNJ which injects the negated sign of its second
8835     // operand. Try and bubble any FNEG up after the extend/round to produce
8836     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8837     // TRUNC=1.
8838     SDValue In2 = N->getOperand(1);
8839     // Avoid cases where the extend/round has multiple uses, as duplicating
8840     // those is typically more expensive than removing a fneg.
8841     if (!In2.hasOneUse())
8842       break;
8843     if (In2.getOpcode() != ISD::FP_EXTEND &&
8844         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8845       break;
8846     In2 = In2.getOperand(0);
8847     if (In2.getOpcode() != ISD::FNEG)
8848       break;
8849     SDLoc DL(N);
8850     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8851     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8852                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8853   }
8854   case ISD::MGATHER:
8855   case ISD::MSCATTER:
8856   case ISD::VP_GATHER:
8857   case ISD::VP_SCATTER: {
8858     if (!DCI.isBeforeLegalize())
8859       break;
8860     SDValue Index, ScaleOp;
8861     bool IsIndexScaled = false;
8862     bool IsIndexSigned = false;
8863     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8864       Index = VPGSN->getIndex();
8865       ScaleOp = VPGSN->getScale();
8866       IsIndexScaled = VPGSN->isIndexScaled();
8867       IsIndexSigned = VPGSN->isIndexSigned();
8868     } else {
8869       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8870       Index = MGSN->getIndex();
8871       ScaleOp = MGSN->getScale();
8872       IsIndexScaled = MGSN->isIndexScaled();
8873       IsIndexSigned = MGSN->isIndexSigned();
8874     }
8875     EVT IndexVT = Index.getValueType();
8876     MVT XLenVT = Subtarget.getXLenVT();
8877     // RISCV indexed loads only support the "unsigned unscaled" addressing
8878     // mode, so anything else must be manually legalized.
8879     bool NeedsIdxLegalization =
8880         IsIndexScaled ||
8881         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8882     if (!NeedsIdxLegalization)
8883       break;
8884 
8885     SDLoc DL(N);
8886 
8887     // Any index legalization should first promote to XLenVT, so we don't lose
8888     // bits when scaling. This may create an illegal index type so we let
8889     // LLVM's legalization take care of the splitting.
8890     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8891     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8892       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8893       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8894                           DL, IndexVT, Index);
8895     }
8896 
8897     if (IsIndexScaled) {
8898       // Manually scale the indices.
8899       // TODO: Sanitize the scale operand here?
8900       // TODO: For VP nodes, should we use VP_SHL here?
8901       unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8902       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8903       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8904       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8905       ScaleOp = DAG.getTargetConstant(1, DL, ScaleOp.getValueType());
8906     }
8907 
8908     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_SCALED;
8909     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8910       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8911                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8912                               ScaleOp, VPGN->getMask(),
8913                               VPGN->getVectorLength()},
8914                              VPGN->getMemOperand(), NewIndexTy);
8915     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8916       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8917                               {VPSN->getChain(), VPSN->getValue(),
8918                                VPSN->getBasePtr(), Index, ScaleOp,
8919                                VPSN->getMask(), VPSN->getVectorLength()},
8920                               VPSN->getMemOperand(), NewIndexTy);
8921     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8922       return DAG.getMaskedGather(
8923           N->getVTList(), MGN->getMemoryVT(), DL,
8924           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8925            MGN->getBasePtr(), Index, ScaleOp},
8926           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8927     const auto *MSN = cast<MaskedScatterSDNode>(N);
8928     return DAG.getMaskedScatter(
8929         N->getVTList(), MSN->getMemoryVT(), DL,
8930         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8931          Index, ScaleOp},
8932         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8933   }
8934   case RISCVISD::SRA_VL:
8935   case RISCVISD::SRL_VL:
8936   case RISCVISD::SHL_VL: {
8937     SDValue ShAmt = N->getOperand(1);
8938     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8939       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8940       SDLoc DL(N);
8941       SDValue VL = N->getOperand(3);
8942       EVT VT = N->getValueType(0);
8943       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8944                           ShAmt.getOperand(1), VL);
8945       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8946                          N->getOperand(2), N->getOperand(3));
8947     }
8948     break;
8949   }
8950   case ISD::SRA:
8951   case ISD::SRL:
8952   case ISD::SHL: {
8953     SDValue ShAmt = N->getOperand(1);
8954     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8955       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8956       SDLoc DL(N);
8957       EVT VT = N->getValueType(0);
8958       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8959                           ShAmt.getOperand(1),
8960                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8961       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8962     }
8963     break;
8964   }
8965   case RISCVISD::ADD_VL:
8966     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8967       return V;
8968     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8969   case RISCVISD::SUB_VL:
8970     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8971   case RISCVISD::VWADD_W_VL:
8972   case RISCVISD::VWADDU_W_VL:
8973   case RISCVISD::VWSUB_W_VL:
8974   case RISCVISD::VWSUBU_W_VL:
8975     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8976   case RISCVISD::MUL_VL:
8977     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8978       return V;
8979     // Mul is commutative.
8980     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8981   case ISD::STORE: {
8982     auto *Store = cast<StoreSDNode>(N);
8983     SDValue Val = Store->getValue();
8984     // Combine store of vmv.x.s to vse with VL of 1.
8985     // FIXME: Support FP.
8986     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8987       SDValue Src = Val.getOperand(0);
8988       EVT VecVT = Src.getValueType();
8989       EVT MemVT = Store->getMemoryVT();
8990       // The memory VT and the element type must match.
8991       if (VecVT.getVectorElementType() == MemVT) {
8992         SDLoc DL(N);
8993         MVT MaskVT = getMaskTypeFor(VecVT);
8994         return DAG.getStoreVP(
8995             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8996             DAG.getConstant(1, DL, MaskVT),
8997             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8998             Store->getMemOperand(), Store->getAddressingMode(),
8999             Store->isTruncatingStore(), /*IsCompress*/ false);
9000       }
9001     }
9002 
9003     break;
9004   }
9005   case ISD::SPLAT_VECTOR: {
9006     EVT VT = N->getValueType(0);
9007     // Only perform this combine on legal MVT types.
9008     if (!isTypeLegal(VT))
9009       break;
9010     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
9011                                          DAG, Subtarget))
9012       return Gather;
9013     break;
9014   }
9015   case RISCVISD::VMV_V_X_VL: {
9016     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
9017     // scalar input.
9018     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
9019     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
9020     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
9021       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
9022         return SDValue(N, 0);
9023 
9024     break;
9025   }
9026   case ISD::INTRINSIC_WO_CHAIN: {
9027     unsigned IntNo = N->getConstantOperandVal(0);
9028     switch (IntNo) {
9029       // By default we do not combine any intrinsic.
9030     default:
9031       return SDValue();
9032     case Intrinsic::riscv_vcpop:
9033     case Intrinsic::riscv_vcpop_mask:
9034     case Intrinsic::riscv_vfirst:
9035     case Intrinsic::riscv_vfirst_mask: {
9036       SDValue VL = N->getOperand(2);
9037       if (IntNo == Intrinsic::riscv_vcpop_mask ||
9038           IntNo == Intrinsic::riscv_vfirst_mask)
9039         VL = N->getOperand(3);
9040       if (!isNullConstant(VL))
9041         return SDValue();
9042       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
9043       SDLoc DL(N);
9044       EVT VT = N->getValueType(0);
9045       if (IntNo == Intrinsic::riscv_vfirst ||
9046           IntNo == Intrinsic::riscv_vfirst_mask)
9047         return DAG.getConstant(-1, DL, VT);
9048       return DAG.getConstant(0, DL, VT);
9049     }
9050     }
9051   }
9052   }
9053 
9054   return SDValue();
9055 }
9056 
9057 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
9058     const SDNode *N, CombineLevel Level) const {
9059   // The following folds are only desirable if `(OP _, c1 << c2)` can be
9060   // materialised in fewer instructions than `(OP _, c1)`:
9061   //
9062   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
9063   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
9064   SDValue N0 = N->getOperand(0);
9065   EVT Ty = N0.getValueType();
9066   if (Ty.isScalarInteger() &&
9067       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
9068     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
9069     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
9070     if (C1 && C2) {
9071       const APInt &C1Int = C1->getAPIntValue();
9072       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
9073 
9074       // We can materialise `c1 << c2` into an add immediate, so it's "free",
9075       // and the combine should happen, to potentially allow further combines
9076       // later.
9077       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
9078           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
9079         return true;
9080 
9081       // We can materialise `c1` in an add immediate, so it's "free", and the
9082       // combine should be prevented.
9083       if (C1Int.getMinSignedBits() <= 64 &&
9084           isLegalAddImmediate(C1Int.getSExtValue()))
9085         return false;
9086 
9087       // Neither constant will fit into an immediate, so find materialisation
9088       // costs.
9089       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
9090                                               Subtarget.getFeatureBits(),
9091                                               /*CompressionCost*/true);
9092       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
9093           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
9094           /*CompressionCost*/true);
9095 
9096       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
9097       // combine should be prevented.
9098       if (C1Cost < ShiftedC1Cost)
9099         return false;
9100     }
9101   }
9102   return true;
9103 }
9104 
9105 bool RISCVTargetLowering::targetShrinkDemandedConstant(
9106     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
9107     TargetLoweringOpt &TLO) const {
9108   // Delay this optimization as late as possible.
9109   if (!TLO.LegalOps)
9110     return false;
9111 
9112   EVT VT = Op.getValueType();
9113   if (VT.isVector())
9114     return false;
9115 
9116   // Only handle AND for now.
9117   if (Op.getOpcode() != ISD::AND)
9118     return false;
9119 
9120   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
9121   if (!C)
9122     return false;
9123 
9124   const APInt &Mask = C->getAPIntValue();
9125 
9126   // Clear all non-demanded bits initially.
9127   APInt ShrunkMask = Mask & DemandedBits;
9128 
9129   // Try to make a smaller immediate by setting undemanded bits.
9130 
9131   APInt ExpandedMask = Mask | ~DemandedBits;
9132 
9133   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
9134     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
9135   };
9136   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
9137     if (NewMask == Mask)
9138       return true;
9139     SDLoc DL(Op);
9140     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
9141     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
9142     return TLO.CombineTo(Op, NewOp);
9143   };
9144 
9145   // If the shrunk mask fits in sign extended 12 bits, let the target
9146   // independent code apply it.
9147   if (ShrunkMask.isSignedIntN(12))
9148     return false;
9149 
9150   // Preserve (and X, 0xffff) when zext.h is supported.
9151   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
9152     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
9153     if (IsLegalMask(NewMask))
9154       return UseMask(NewMask);
9155   }
9156 
9157   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
9158   if (VT == MVT::i64) {
9159     APInt NewMask = APInt(64, 0xffffffff);
9160     if (IsLegalMask(NewMask))
9161       return UseMask(NewMask);
9162   }
9163 
9164   // For the remaining optimizations, we need to be able to make a negative
9165   // number through a combination of mask and undemanded bits.
9166   if (!ExpandedMask.isNegative())
9167     return false;
9168 
9169   // What is the fewest number of bits we need to represent the negative number.
9170   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
9171 
9172   // Try to make a 12 bit negative immediate. If that fails try to make a 32
9173   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
9174   APInt NewMask = ShrunkMask;
9175   if (MinSignedBits <= 12)
9176     NewMask.setBitsFrom(11);
9177   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
9178     NewMask.setBitsFrom(31);
9179   else
9180     return false;
9181 
9182   // Check that our new mask is a subset of the demanded mask.
9183   assert(IsLegalMask(NewMask));
9184   return UseMask(NewMask);
9185 }
9186 
9187 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
9188   static const uint64_t GREVMasks[] = {
9189       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
9190       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
9191 
9192   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9193     unsigned Shift = 1 << Stage;
9194     if (ShAmt & Shift) {
9195       uint64_t Mask = GREVMasks[Stage];
9196       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9197       if (IsGORC)
9198         Res |= x;
9199       x = Res;
9200     }
9201   }
9202 
9203   return x;
9204 }
9205 
9206 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9207                                                         KnownBits &Known,
9208                                                         const APInt &DemandedElts,
9209                                                         const SelectionDAG &DAG,
9210                                                         unsigned Depth) const {
9211   unsigned BitWidth = Known.getBitWidth();
9212   unsigned Opc = Op.getOpcode();
9213   assert((Opc >= ISD::BUILTIN_OP_END ||
9214           Opc == ISD::INTRINSIC_WO_CHAIN ||
9215           Opc == ISD::INTRINSIC_W_CHAIN ||
9216           Opc == ISD::INTRINSIC_VOID) &&
9217          "Should use MaskedValueIsZero if you don't know whether Op"
9218          " is a target node!");
9219 
9220   Known.resetAll();
9221   switch (Opc) {
9222   default: break;
9223   case RISCVISD::SELECT_CC: {
9224     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9225     // If we don't know any bits, early out.
9226     if (Known.isUnknown())
9227       break;
9228     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9229 
9230     // Only known if known in both the LHS and RHS.
9231     Known = KnownBits::commonBits(Known, Known2);
9232     break;
9233   }
9234   case RISCVISD::REMUW: {
9235     KnownBits Known2;
9236     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9237     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9238     // We only care about the lower 32 bits.
9239     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9240     // Restore the original width by sign extending.
9241     Known = Known.sext(BitWidth);
9242     break;
9243   }
9244   case RISCVISD::DIVUW: {
9245     KnownBits Known2;
9246     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9247     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9248     // We only care about the lower 32 bits.
9249     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9250     // Restore the original width by sign extending.
9251     Known = Known.sext(BitWidth);
9252     break;
9253   }
9254   case RISCVISD::CTZW: {
9255     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9256     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9257     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9258     Known.Zero.setBitsFrom(LowBits);
9259     break;
9260   }
9261   case RISCVISD::CLZW: {
9262     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9263     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9264     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9265     Known.Zero.setBitsFrom(LowBits);
9266     break;
9267   }
9268   case RISCVISD::GREV:
9269   case RISCVISD::GORC: {
9270     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9271       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9272       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9273       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9274       // To compute zeros, we need to invert the value and invert it back after.
9275       Known.Zero =
9276           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9277       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9278     }
9279     break;
9280   }
9281   case RISCVISD::READ_VLENB: {
9282     // If we know the minimum VLen from Zvl extensions, we can use that to
9283     // determine the trailing zeros of VLENB.
9284     // FIXME: Limit to 128 bit vectors until we have more testing.
9285     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9286     if (MinVLenB > 0)
9287       Known.Zero.setLowBits(Log2_32(MinVLenB));
9288     // We assume VLENB is no more than 65536 / 8 bytes.
9289     Known.Zero.setBitsFrom(14);
9290     break;
9291   }
9292   case ISD::INTRINSIC_W_CHAIN:
9293   case ISD::INTRINSIC_WO_CHAIN: {
9294     unsigned IntNo =
9295         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9296     switch (IntNo) {
9297     default:
9298       // We can't do anything for most intrinsics.
9299       break;
9300     case Intrinsic::riscv_vsetvli:
9301     case Intrinsic::riscv_vsetvlimax:
9302     case Intrinsic::riscv_vsetvli_opt:
9303     case Intrinsic::riscv_vsetvlimax_opt:
9304       // Assume that VL output is positive and would fit in an int32_t.
9305       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9306       if (BitWidth >= 32)
9307         Known.Zero.setBitsFrom(31);
9308       break;
9309     }
9310     break;
9311   }
9312   }
9313 }
9314 
9315 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9316     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9317     unsigned Depth) const {
9318   switch (Op.getOpcode()) {
9319   default:
9320     break;
9321   case RISCVISD::SELECT_CC: {
9322     unsigned Tmp =
9323         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9324     if (Tmp == 1) return 1;  // Early out.
9325     unsigned Tmp2 =
9326         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9327     return std::min(Tmp, Tmp2);
9328   }
9329   case RISCVISD::SLLW:
9330   case RISCVISD::SRAW:
9331   case RISCVISD::SRLW:
9332   case RISCVISD::DIVW:
9333   case RISCVISD::DIVUW:
9334   case RISCVISD::REMUW:
9335   case RISCVISD::ROLW:
9336   case RISCVISD::RORW:
9337   case RISCVISD::GREVW:
9338   case RISCVISD::GORCW:
9339   case RISCVISD::FSLW:
9340   case RISCVISD::FSRW:
9341   case RISCVISD::SHFLW:
9342   case RISCVISD::UNSHFLW:
9343   case RISCVISD::BCOMPRESSW:
9344   case RISCVISD::BDECOMPRESSW:
9345   case RISCVISD::BFPW:
9346   case RISCVISD::FCVT_W_RV64:
9347   case RISCVISD::FCVT_WU_RV64:
9348   case RISCVISD::STRICT_FCVT_W_RV64:
9349   case RISCVISD::STRICT_FCVT_WU_RV64:
9350     // TODO: As the result is sign-extended, this is conservatively correct. A
9351     // more precise answer could be calculated for SRAW depending on known
9352     // bits in the shift amount.
9353     return 33;
9354   case RISCVISD::SHFL:
9355   case RISCVISD::UNSHFL: {
9356     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9357     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9358     // will stay within the upper 32 bits. If there were more than 32 sign bits
9359     // before there will be at least 33 sign bits after.
9360     if (Op.getValueType() == MVT::i64 &&
9361         isa<ConstantSDNode>(Op.getOperand(1)) &&
9362         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9363       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9364       if (Tmp > 32)
9365         return 33;
9366     }
9367     break;
9368   }
9369   case RISCVISD::VMV_X_S: {
9370     // The number of sign bits of the scalar result is computed by obtaining the
9371     // element type of the input vector operand, subtracting its width from the
9372     // XLEN, and then adding one (sign bit within the element type). If the
9373     // element type is wider than XLen, the least-significant XLEN bits are
9374     // taken.
9375     unsigned XLen = Subtarget.getXLen();
9376     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9377     if (EltBits <= XLen)
9378       return XLen - EltBits + 1;
9379     break;
9380   }
9381   }
9382 
9383   return 1;
9384 }
9385 
9386 const Constant *
9387 RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {
9388   assert(Ld && "Unexpected null LoadSDNode");
9389   if (!ISD::isNormalLoad(Ld))
9390     return nullptr;
9391 
9392   SDValue Ptr = Ld->getBasePtr();
9393 
9394   // Only constant pools with no offset are supported.
9395   auto GetSupportedConstantPool = [](SDValue Ptr) -> ConstantPoolSDNode * {
9396     auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
9397     if (!CNode || CNode->isMachineConstantPoolEntry() ||
9398         CNode->getOffset() != 0)
9399       return nullptr;
9400 
9401     return CNode;
9402   };
9403 
9404   // Simple case, LLA.
9405   if (Ptr.getOpcode() == RISCVISD::LLA) {
9406     auto *CNode = GetSupportedConstantPool(Ptr);
9407     if (!CNode || CNode->getTargetFlags() != 0)
9408       return nullptr;
9409 
9410     return CNode->getConstVal();
9411   }
9412 
9413   // Look for a HI and ADD_LO pair.
9414   if (Ptr.getOpcode() != RISCVISD::ADD_LO ||
9415       Ptr.getOperand(0).getOpcode() != RISCVISD::HI)
9416     return nullptr;
9417 
9418   auto *CNodeLo = GetSupportedConstantPool(Ptr.getOperand(1));
9419   auto *CNodeHi = GetSupportedConstantPool(Ptr.getOperand(0).getOperand(0));
9420 
9421   if (!CNodeLo || CNodeLo->getTargetFlags() != RISCVII::MO_LO ||
9422       !CNodeHi || CNodeHi->getTargetFlags() != RISCVII::MO_HI)
9423     return nullptr;
9424 
9425   if (CNodeLo->getConstVal() != CNodeHi->getConstVal())
9426     return nullptr;
9427 
9428   return CNodeLo->getConstVal();
9429 }
9430 
9431 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9432                                                   MachineBasicBlock *BB) {
9433   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9434 
9435   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9436   // Should the count have wrapped while it was being read, we need to try
9437   // again.
9438   // ...
9439   // read:
9440   // rdcycleh x3 # load high word of cycle
9441   // rdcycle  x2 # load low word of cycle
9442   // rdcycleh x4 # load high word of cycle
9443   // bne x3, x4, read # check if high word reads match, otherwise try again
9444   // ...
9445 
9446   MachineFunction &MF = *BB->getParent();
9447   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9448   MachineFunction::iterator It = ++BB->getIterator();
9449 
9450   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9451   MF.insert(It, LoopMBB);
9452 
9453   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9454   MF.insert(It, DoneMBB);
9455 
9456   // Transfer the remainder of BB and its successor edges to DoneMBB.
9457   DoneMBB->splice(DoneMBB->begin(), BB,
9458                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9459   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9460 
9461   BB->addSuccessor(LoopMBB);
9462 
9463   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9464   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9465   Register LoReg = MI.getOperand(0).getReg();
9466   Register HiReg = MI.getOperand(1).getReg();
9467   DebugLoc DL = MI.getDebugLoc();
9468 
9469   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9470   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9471       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9472       .addReg(RISCV::X0);
9473   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9474       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9475       .addReg(RISCV::X0);
9476   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9477       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9478       .addReg(RISCV::X0);
9479 
9480   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9481       .addReg(HiReg)
9482       .addReg(ReadAgainReg)
9483       .addMBB(LoopMBB);
9484 
9485   LoopMBB->addSuccessor(LoopMBB);
9486   LoopMBB->addSuccessor(DoneMBB);
9487 
9488   MI.eraseFromParent();
9489 
9490   return DoneMBB;
9491 }
9492 
9493 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9494                                              MachineBasicBlock *BB) {
9495   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9496 
9497   MachineFunction &MF = *BB->getParent();
9498   DebugLoc DL = MI.getDebugLoc();
9499   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9500   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9501   Register LoReg = MI.getOperand(0).getReg();
9502   Register HiReg = MI.getOperand(1).getReg();
9503   Register SrcReg = MI.getOperand(2).getReg();
9504   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9505   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9506 
9507   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9508                           RI);
9509   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9510   MachineMemOperand *MMOLo =
9511       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9512   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9513       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9514   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9515       .addFrameIndex(FI)
9516       .addImm(0)
9517       .addMemOperand(MMOLo);
9518   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9519       .addFrameIndex(FI)
9520       .addImm(4)
9521       .addMemOperand(MMOHi);
9522   MI.eraseFromParent(); // The pseudo instruction is gone now.
9523   return BB;
9524 }
9525 
9526 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9527                                                  MachineBasicBlock *BB) {
9528   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9529          "Unexpected instruction");
9530 
9531   MachineFunction &MF = *BB->getParent();
9532   DebugLoc DL = MI.getDebugLoc();
9533   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9534   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9535   Register DstReg = MI.getOperand(0).getReg();
9536   Register LoReg = MI.getOperand(1).getReg();
9537   Register HiReg = MI.getOperand(2).getReg();
9538   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9539   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9540 
9541   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9542   MachineMemOperand *MMOLo =
9543       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9544   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9545       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9546   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9547       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9548       .addFrameIndex(FI)
9549       .addImm(0)
9550       .addMemOperand(MMOLo);
9551   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9552       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9553       .addFrameIndex(FI)
9554       .addImm(4)
9555       .addMemOperand(MMOHi);
9556   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9557   MI.eraseFromParent(); // The pseudo instruction is gone now.
9558   return BB;
9559 }
9560 
9561 static bool isSelectPseudo(MachineInstr &MI) {
9562   switch (MI.getOpcode()) {
9563   default:
9564     return false;
9565   case RISCV::Select_GPR_Using_CC_GPR:
9566   case RISCV::Select_FPR16_Using_CC_GPR:
9567   case RISCV::Select_FPR32_Using_CC_GPR:
9568   case RISCV::Select_FPR64_Using_CC_GPR:
9569     return true;
9570   }
9571 }
9572 
9573 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9574                                         unsigned RelOpcode, unsigned EqOpcode,
9575                                         const RISCVSubtarget &Subtarget) {
9576   DebugLoc DL = MI.getDebugLoc();
9577   Register DstReg = MI.getOperand(0).getReg();
9578   Register Src1Reg = MI.getOperand(1).getReg();
9579   Register Src2Reg = MI.getOperand(2).getReg();
9580   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9581   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9582   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9583 
9584   // Save the current FFLAGS.
9585   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9586 
9587   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9588                  .addReg(Src1Reg)
9589                  .addReg(Src2Reg);
9590   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9591     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9592 
9593   // Restore the FFLAGS.
9594   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9595       .addReg(SavedFFlags, RegState::Kill);
9596 
9597   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9598   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9599                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9600                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9601   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9602     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9603 
9604   // Erase the pseudoinstruction.
9605   MI.eraseFromParent();
9606   return BB;
9607 }
9608 
9609 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9610                                            MachineBasicBlock *BB,
9611                                            const RISCVSubtarget &Subtarget) {
9612   // To "insert" Select_* instructions, we actually have to insert the triangle
9613   // control-flow pattern.  The incoming instructions know the destination vreg
9614   // to set, the condition code register to branch on, the true/false values to
9615   // select between, and the condcode to use to select the appropriate branch.
9616   //
9617   // We produce the following control flow:
9618   //     HeadMBB
9619   //     |  \
9620   //     |  IfFalseMBB
9621   //     | /
9622   //    TailMBB
9623   //
9624   // When we find a sequence of selects we attempt to optimize their emission
9625   // by sharing the control flow. Currently we only handle cases where we have
9626   // multiple selects with the exact same condition (same LHS, RHS and CC).
9627   // The selects may be interleaved with other instructions if the other
9628   // instructions meet some requirements we deem safe:
9629   // - They are debug instructions. Otherwise,
9630   // - They do not have side-effects, do not access memory and their inputs do
9631   //   not depend on the results of the select pseudo-instructions.
9632   // The TrueV/FalseV operands of the selects cannot depend on the result of
9633   // previous selects in the sequence.
9634   // These conditions could be further relaxed. See the X86 target for a
9635   // related approach and more information.
9636   Register LHS = MI.getOperand(1).getReg();
9637   Register RHS = MI.getOperand(2).getReg();
9638   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9639 
9640   SmallVector<MachineInstr *, 4> SelectDebugValues;
9641   SmallSet<Register, 4> SelectDests;
9642   SelectDests.insert(MI.getOperand(0).getReg());
9643 
9644   MachineInstr *LastSelectPseudo = &MI;
9645 
9646   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9647        SequenceMBBI != E; ++SequenceMBBI) {
9648     if (SequenceMBBI->isDebugInstr())
9649       continue;
9650     if (isSelectPseudo(*SequenceMBBI)) {
9651       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9652           SequenceMBBI->getOperand(2).getReg() != RHS ||
9653           SequenceMBBI->getOperand(3).getImm() != CC ||
9654           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9655           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9656         break;
9657       LastSelectPseudo = &*SequenceMBBI;
9658       SequenceMBBI->collectDebugValues(SelectDebugValues);
9659       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9660     } else {
9661       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9662           SequenceMBBI->mayLoadOrStore())
9663         break;
9664       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9665             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9666           }))
9667         break;
9668     }
9669   }
9670 
9671   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9672   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9673   DebugLoc DL = MI.getDebugLoc();
9674   MachineFunction::iterator I = ++BB->getIterator();
9675 
9676   MachineBasicBlock *HeadMBB = BB;
9677   MachineFunction *F = BB->getParent();
9678   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9679   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9680 
9681   F->insert(I, IfFalseMBB);
9682   F->insert(I, TailMBB);
9683 
9684   // Transfer debug instructions associated with the selects to TailMBB.
9685   for (MachineInstr *DebugInstr : SelectDebugValues) {
9686     TailMBB->push_back(DebugInstr->removeFromParent());
9687   }
9688 
9689   // Move all instructions after the sequence to TailMBB.
9690   TailMBB->splice(TailMBB->end(), HeadMBB,
9691                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9692   // Update machine-CFG edges by transferring all successors of the current
9693   // block to the new block which will contain the Phi nodes for the selects.
9694   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9695   // Set the successors for HeadMBB.
9696   HeadMBB->addSuccessor(IfFalseMBB);
9697   HeadMBB->addSuccessor(TailMBB);
9698 
9699   // Insert appropriate branch.
9700   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9701     .addReg(LHS)
9702     .addReg(RHS)
9703     .addMBB(TailMBB);
9704 
9705   // IfFalseMBB just falls through to TailMBB.
9706   IfFalseMBB->addSuccessor(TailMBB);
9707 
9708   // Create PHIs for all of the select pseudo-instructions.
9709   auto SelectMBBI = MI.getIterator();
9710   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9711   auto InsertionPoint = TailMBB->begin();
9712   while (SelectMBBI != SelectEnd) {
9713     auto Next = std::next(SelectMBBI);
9714     if (isSelectPseudo(*SelectMBBI)) {
9715       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9716       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9717               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9718           .addReg(SelectMBBI->getOperand(4).getReg())
9719           .addMBB(HeadMBB)
9720           .addReg(SelectMBBI->getOperand(5).getReg())
9721           .addMBB(IfFalseMBB);
9722       SelectMBBI->eraseFromParent();
9723     }
9724     SelectMBBI = Next;
9725   }
9726 
9727   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9728   return TailMBB;
9729 }
9730 
9731 MachineBasicBlock *
9732 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9733                                                  MachineBasicBlock *BB) const {
9734   switch (MI.getOpcode()) {
9735   default:
9736     llvm_unreachable("Unexpected instr type to insert");
9737   case RISCV::ReadCycleWide:
9738     assert(!Subtarget.is64Bit() &&
9739            "ReadCycleWrite is only to be used on riscv32");
9740     return emitReadCycleWidePseudo(MI, BB);
9741   case RISCV::Select_GPR_Using_CC_GPR:
9742   case RISCV::Select_FPR16_Using_CC_GPR:
9743   case RISCV::Select_FPR32_Using_CC_GPR:
9744   case RISCV::Select_FPR64_Using_CC_GPR:
9745     return emitSelectPseudo(MI, BB, Subtarget);
9746   case RISCV::BuildPairF64Pseudo:
9747     return emitBuildPairF64Pseudo(MI, BB);
9748   case RISCV::SplitF64Pseudo:
9749     return emitSplitF64Pseudo(MI, BB);
9750   case RISCV::PseudoQuietFLE_H:
9751     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9752   case RISCV::PseudoQuietFLT_H:
9753     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9754   case RISCV::PseudoQuietFLE_S:
9755     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9756   case RISCV::PseudoQuietFLT_S:
9757     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9758   case RISCV::PseudoQuietFLE_D:
9759     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9760   case RISCV::PseudoQuietFLT_D:
9761     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9762   }
9763 }
9764 
9765 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9766                                                         SDNode *Node) const {
9767   // Add FRM dependency to any instructions with dynamic rounding mode.
9768   unsigned Opc = MI.getOpcode();
9769   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9770   if (Idx < 0)
9771     return;
9772   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9773     return;
9774   // If the instruction already reads FRM, don't add another read.
9775   if (MI.readsRegister(RISCV::FRM))
9776     return;
9777   MI.addOperand(
9778       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9779 }
9780 
9781 // Calling Convention Implementation.
9782 // The expectations for frontend ABI lowering vary from target to target.
9783 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9784 // details, but this is a longer term goal. For now, we simply try to keep the
9785 // role of the frontend as simple and well-defined as possible. The rules can
9786 // be summarised as:
9787 // * Never split up large scalar arguments. We handle them here.
9788 // * If a hardfloat calling convention is being used, and the struct may be
9789 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9790 // available, then pass as two separate arguments. If either the GPRs or FPRs
9791 // are exhausted, then pass according to the rule below.
9792 // * If a struct could never be passed in registers or directly in a stack
9793 // slot (as it is larger than 2*XLEN and the floating point rules don't
9794 // apply), then pass it using a pointer with the byval attribute.
9795 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9796 // word-sized array or a 2*XLEN scalar (depending on alignment).
9797 // * The frontend can determine whether a struct is returned by reference or
9798 // not based on its size and fields. If it will be returned by reference, the
9799 // frontend must modify the prototype so a pointer with the sret annotation is
9800 // passed as the first argument. This is not necessary for large scalar
9801 // returns.
9802 // * Struct return values and varargs should be coerced to structs containing
9803 // register-size fields in the same situations they would be for fixed
9804 // arguments.
9805 
9806 static const MCPhysReg ArgGPRs[] = {
9807   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9808   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9809 };
9810 static const MCPhysReg ArgFPR16s[] = {
9811   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9812   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9813 };
9814 static const MCPhysReg ArgFPR32s[] = {
9815   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9816   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9817 };
9818 static const MCPhysReg ArgFPR64s[] = {
9819   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9820   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9821 };
9822 // This is an interim calling convention and it may be changed in the future.
9823 static const MCPhysReg ArgVRs[] = {
9824     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9825     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9826     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9827 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9828                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9829                                      RISCV::V20M2, RISCV::V22M2};
9830 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9831                                      RISCV::V20M4};
9832 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9833 
9834 // Pass a 2*XLEN argument that has been split into two XLEN values through
9835 // registers or the stack as necessary.
9836 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9837                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9838                                 MVT ValVT2, MVT LocVT2,
9839                                 ISD::ArgFlagsTy ArgFlags2) {
9840   unsigned XLenInBytes = XLen / 8;
9841   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9842     // At least one half can be passed via register.
9843     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9844                                      VA1.getLocVT(), CCValAssign::Full));
9845   } else {
9846     // Both halves must be passed on the stack, with proper alignment.
9847     Align StackAlign =
9848         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9849     State.addLoc(
9850         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9851                             State.AllocateStack(XLenInBytes, StackAlign),
9852                             VA1.getLocVT(), CCValAssign::Full));
9853     State.addLoc(CCValAssign::getMem(
9854         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9855         LocVT2, CCValAssign::Full));
9856     return false;
9857   }
9858 
9859   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9860     // The second half can also be passed via register.
9861     State.addLoc(
9862         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9863   } else {
9864     // The second half is passed via the stack, without additional alignment.
9865     State.addLoc(CCValAssign::getMem(
9866         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9867         LocVT2, CCValAssign::Full));
9868   }
9869 
9870   return false;
9871 }
9872 
9873 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9874                                Optional<unsigned> FirstMaskArgument,
9875                                CCState &State, const RISCVTargetLowering &TLI) {
9876   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9877   if (RC == &RISCV::VRRegClass) {
9878     // Assign the first mask argument to V0.
9879     // This is an interim calling convention and it may be changed in the
9880     // future.
9881     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9882       return State.AllocateReg(RISCV::V0);
9883     return State.AllocateReg(ArgVRs);
9884   }
9885   if (RC == &RISCV::VRM2RegClass)
9886     return State.AllocateReg(ArgVRM2s);
9887   if (RC == &RISCV::VRM4RegClass)
9888     return State.AllocateReg(ArgVRM4s);
9889   if (RC == &RISCV::VRM8RegClass)
9890     return State.AllocateReg(ArgVRM8s);
9891   llvm_unreachable("Unhandled register class for ValueType");
9892 }
9893 
9894 // Implements the RISC-V calling convention. Returns true upon failure.
9895 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9896                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9897                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9898                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9899                      Optional<unsigned> FirstMaskArgument) {
9900   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9901   assert(XLen == 32 || XLen == 64);
9902   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9903 
9904   // Any return value split in to more than two values can't be returned
9905   // directly. Vectors are returned via the available vector registers.
9906   if (!LocVT.isVector() && IsRet && ValNo > 1)
9907     return true;
9908 
9909   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9910   // variadic argument, or if no F16/F32 argument registers are available.
9911   bool UseGPRForF16_F32 = true;
9912   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9913   // variadic argument, or if no F64 argument registers are available.
9914   bool UseGPRForF64 = true;
9915 
9916   switch (ABI) {
9917   default:
9918     llvm_unreachable("Unexpected ABI");
9919   case RISCVABI::ABI_ILP32:
9920   case RISCVABI::ABI_LP64:
9921     break;
9922   case RISCVABI::ABI_ILP32F:
9923   case RISCVABI::ABI_LP64F:
9924     UseGPRForF16_F32 = !IsFixed;
9925     break;
9926   case RISCVABI::ABI_ILP32D:
9927   case RISCVABI::ABI_LP64D:
9928     UseGPRForF16_F32 = !IsFixed;
9929     UseGPRForF64 = !IsFixed;
9930     break;
9931   }
9932 
9933   // FPR16, FPR32, and FPR64 alias each other.
9934   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9935     UseGPRForF16_F32 = true;
9936     UseGPRForF64 = true;
9937   }
9938 
9939   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9940   // similar local variables rather than directly checking against the target
9941   // ABI.
9942 
9943   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9944     LocVT = XLenVT;
9945     LocInfo = CCValAssign::BCvt;
9946   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9947     LocVT = MVT::i64;
9948     LocInfo = CCValAssign::BCvt;
9949   }
9950 
9951   // If this is a variadic argument, the RISC-V calling convention requires
9952   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9953   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9954   // be used regardless of whether the original argument was split during
9955   // legalisation or not. The argument will not be passed by registers if the
9956   // original type is larger than 2*XLEN, so the register alignment rule does
9957   // not apply.
9958   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9959   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9960       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9961     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9962     // Skip 'odd' register if necessary.
9963     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9964       State.AllocateReg(ArgGPRs);
9965   }
9966 
9967   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9968   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9969       State.getPendingArgFlags();
9970 
9971   assert(PendingLocs.size() == PendingArgFlags.size() &&
9972          "PendingLocs and PendingArgFlags out of sync");
9973 
9974   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9975   // registers are exhausted.
9976   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9977     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9978            "Can't lower f64 if it is split");
9979     // Depending on available argument GPRS, f64 may be passed in a pair of
9980     // GPRs, split between a GPR and the stack, or passed completely on the
9981     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9982     // cases.
9983     Register Reg = State.AllocateReg(ArgGPRs);
9984     LocVT = MVT::i32;
9985     if (!Reg) {
9986       unsigned StackOffset = State.AllocateStack(8, Align(8));
9987       State.addLoc(
9988           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9989       return false;
9990     }
9991     if (!State.AllocateReg(ArgGPRs))
9992       State.AllocateStack(4, Align(4));
9993     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9994     return false;
9995   }
9996 
9997   // Fixed-length vectors are located in the corresponding scalable-vector
9998   // container types.
9999   if (ValVT.isFixedLengthVector())
10000     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10001 
10002   // Split arguments might be passed indirectly, so keep track of the pending
10003   // values. Split vectors are passed via a mix of registers and indirectly, so
10004   // treat them as we would any other argument.
10005   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
10006     LocVT = XLenVT;
10007     LocInfo = CCValAssign::Indirect;
10008     PendingLocs.push_back(
10009         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
10010     PendingArgFlags.push_back(ArgFlags);
10011     if (!ArgFlags.isSplitEnd()) {
10012       return false;
10013     }
10014   }
10015 
10016   // If the split argument only had two elements, it should be passed directly
10017   // in registers or on the stack.
10018   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
10019       PendingLocs.size() <= 2) {
10020     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
10021     // Apply the normal calling convention rules to the first half of the
10022     // split argument.
10023     CCValAssign VA = PendingLocs[0];
10024     ISD::ArgFlagsTy AF = PendingArgFlags[0];
10025     PendingLocs.clear();
10026     PendingArgFlags.clear();
10027     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
10028                                ArgFlags);
10029   }
10030 
10031   // Allocate to a register if possible, or else a stack slot.
10032   Register Reg;
10033   unsigned StoreSizeBytes = XLen / 8;
10034   Align StackAlign = Align(XLen / 8);
10035 
10036   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
10037     Reg = State.AllocateReg(ArgFPR16s);
10038   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
10039     Reg = State.AllocateReg(ArgFPR32s);
10040   else if (ValVT == MVT::f64 && !UseGPRForF64)
10041     Reg = State.AllocateReg(ArgFPR64s);
10042   else if (ValVT.isVector()) {
10043     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
10044     if (!Reg) {
10045       // For return values, the vector must be passed fully via registers or
10046       // via the stack.
10047       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
10048       // but we're using all of them.
10049       if (IsRet)
10050         return true;
10051       // Try using a GPR to pass the address
10052       if ((Reg = State.AllocateReg(ArgGPRs))) {
10053         LocVT = XLenVT;
10054         LocInfo = CCValAssign::Indirect;
10055       } else if (ValVT.isScalableVector()) {
10056         LocVT = XLenVT;
10057         LocInfo = CCValAssign::Indirect;
10058       } else {
10059         // Pass fixed-length vectors on the stack.
10060         LocVT = ValVT;
10061         StoreSizeBytes = ValVT.getStoreSize();
10062         // Align vectors to their element sizes, being careful for vXi1
10063         // vectors.
10064         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10065       }
10066     }
10067   } else {
10068     Reg = State.AllocateReg(ArgGPRs);
10069   }
10070 
10071   unsigned StackOffset =
10072       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
10073 
10074   // If we reach this point and PendingLocs is non-empty, we must be at the
10075   // end of a split argument that must be passed indirectly.
10076   if (!PendingLocs.empty()) {
10077     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
10078     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
10079 
10080     for (auto &It : PendingLocs) {
10081       if (Reg)
10082         It.convertToReg(Reg);
10083       else
10084         It.convertToMem(StackOffset);
10085       State.addLoc(It);
10086     }
10087     PendingLocs.clear();
10088     PendingArgFlags.clear();
10089     return false;
10090   }
10091 
10092   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
10093           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
10094          "Expected an XLenVT or vector types at this stage");
10095 
10096   if (Reg) {
10097     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10098     return false;
10099   }
10100 
10101   // When a floating-point value is passed on the stack, no bit-conversion is
10102   // needed.
10103   if (ValVT.isFloatingPoint()) {
10104     LocVT = ValVT;
10105     LocInfo = CCValAssign::Full;
10106   }
10107   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10108   return false;
10109 }
10110 
10111 template <typename ArgTy>
10112 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
10113   for (const auto &ArgIdx : enumerate(Args)) {
10114     MVT ArgVT = ArgIdx.value().VT;
10115     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
10116       return ArgIdx.index();
10117   }
10118   return None;
10119 }
10120 
10121 void RISCVTargetLowering::analyzeInputArgs(
10122     MachineFunction &MF, CCState &CCInfo,
10123     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
10124     RISCVCCAssignFn Fn) const {
10125   unsigned NumArgs = Ins.size();
10126   FunctionType *FType = MF.getFunction().getFunctionType();
10127 
10128   Optional<unsigned> FirstMaskArgument;
10129   if (Subtarget.hasVInstructions())
10130     FirstMaskArgument = preAssignMask(Ins);
10131 
10132   for (unsigned i = 0; i != NumArgs; ++i) {
10133     MVT ArgVT = Ins[i].VT;
10134     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
10135 
10136     Type *ArgTy = nullptr;
10137     if (IsRet)
10138       ArgTy = FType->getReturnType();
10139     else if (Ins[i].isOrigArg())
10140       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
10141 
10142     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10143     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10144            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
10145            FirstMaskArgument)) {
10146       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
10147                         << EVT(ArgVT).getEVTString() << '\n');
10148       llvm_unreachable(nullptr);
10149     }
10150   }
10151 }
10152 
10153 void RISCVTargetLowering::analyzeOutputArgs(
10154     MachineFunction &MF, CCState &CCInfo,
10155     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
10156     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
10157   unsigned NumArgs = Outs.size();
10158 
10159   Optional<unsigned> FirstMaskArgument;
10160   if (Subtarget.hasVInstructions())
10161     FirstMaskArgument = preAssignMask(Outs);
10162 
10163   for (unsigned i = 0; i != NumArgs; i++) {
10164     MVT ArgVT = Outs[i].VT;
10165     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10166     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
10167 
10168     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10169     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
10170            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
10171            FirstMaskArgument)) {
10172       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
10173                         << EVT(ArgVT).getEVTString() << "\n");
10174       llvm_unreachable(nullptr);
10175     }
10176   }
10177 }
10178 
10179 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
10180 // values.
10181 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
10182                                    const CCValAssign &VA, const SDLoc &DL,
10183                                    const RISCVSubtarget &Subtarget) {
10184   switch (VA.getLocInfo()) {
10185   default:
10186     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10187   case CCValAssign::Full:
10188     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
10189       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
10190     break;
10191   case CCValAssign::BCvt:
10192     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10193       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
10194     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10195       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
10196     else
10197       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
10198     break;
10199   }
10200   return Val;
10201 }
10202 
10203 // The caller is responsible for loading the full value if the argument is
10204 // passed with CCValAssign::Indirect.
10205 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
10206                                 const CCValAssign &VA, const SDLoc &DL,
10207                                 const RISCVTargetLowering &TLI) {
10208   MachineFunction &MF = DAG.getMachineFunction();
10209   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10210   EVT LocVT = VA.getLocVT();
10211   SDValue Val;
10212   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
10213   Register VReg = RegInfo.createVirtualRegister(RC);
10214   RegInfo.addLiveIn(VA.getLocReg(), VReg);
10215   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
10216 
10217   if (VA.getLocInfo() == CCValAssign::Indirect)
10218     return Val;
10219 
10220   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
10221 }
10222 
10223 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
10224                                    const CCValAssign &VA, const SDLoc &DL,
10225                                    const RISCVSubtarget &Subtarget) {
10226   EVT LocVT = VA.getLocVT();
10227 
10228   switch (VA.getLocInfo()) {
10229   default:
10230     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10231   case CCValAssign::Full:
10232     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
10233       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
10234     break;
10235   case CCValAssign::BCvt:
10236     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10237       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10238     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10239       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10240     else
10241       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10242     break;
10243   }
10244   return Val;
10245 }
10246 
10247 // The caller is responsible for loading the full value if the argument is
10248 // passed with CCValAssign::Indirect.
10249 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10250                                 const CCValAssign &VA, const SDLoc &DL) {
10251   MachineFunction &MF = DAG.getMachineFunction();
10252   MachineFrameInfo &MFI = MF.getFrameInfo();
10253   EVT LocVT = VA.getLocVT();
10254   EVT ValVT = VA.getValVT();
10255   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10256   if (ValVT.isScalableVector()) {
10257     // When the value is a scalable vector, we save the pointer which points to
10258     // the scalable vector value in the stack. The ValVT will be the pointer
10259     // type, instead of the scalable vector type.
10260     ValVT = LocVT;
10261   }
10262   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10263                                  /*IsImmutable=*/true);
10264   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10265   SDValue Val;
10266 
10267   ISD::LoadExtType ExtType;
10268   switch (VA.getLocInfo()) {
10269   default:
10270     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10271   case CCValAssign::Full:
10272   case CCValAssign::Indirect:
10273   case CCValAssign::BCvt:
10274     ExtType = ISD::NON_EXTLOAD;
10275     break;
10276   }
10277   Val = DAG.getExtLoad(
10278       ExtType, DL, LocVT, Chain, FIN,
10279       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10280   return Val;
10281 }
10282 
10283 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10284                                        const CCValAssign &VA, const SDLoc &DL) {
10285   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10286          "Unexpected VA");
10287   MachineFunction &MF = DAG.getMachineFunction();
10288   MachineFrameInfo &MFI = MF.getFrameInfo();
10289   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10290 
10291   if (VA.isMemLoc()) {
10292     // f64 is passed on the stack.
10293     int FI =
10294         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10295     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10296     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10297                        MachinePointerInfo::getFixedStack(MF, FI));
10298   }
10299 
10300   assert(VA.isRegLoc() && "Expected register VA assignment");
10301 
10302   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10303   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10304   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10305   SDValue Hi;
10306   if (VA.getLocReg() == RISCV::X17) {
10307     // Second half of f64 is passed on the stack.
10308     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10309     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10310     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10311                      MachinePointerInfo::getFixedStack(MF, FI));
10312   } else {
10313     // Second half of f64 is passed in another GPR.
10314     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10315     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10316     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10317   }
10318   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10319 }
10320 
10321 // FastCC has less than 1% performance improvement for some particular
10322 // benchmark. But theoretically, it may has benenfit for some cases.
10323 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10324                             unsigned ValNo, MVT ValVT, MVT LocVT,
10325                             CCValAssign::LocInfo LocInfo,
10326                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10327                             bool IsFixed, bool IsRet, Type *OrigTy,
10328                             const RISCVTargetLowering &TLI,
10329                             Optional<unsigned> FirstMaskArgument) {
10330 
10331   // X5 and X6 might be used for save-restore libcall.
10332   static const MCPhysReg GPRList[] = {
10333       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10334       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10335       RISCV::X29, RISCV::X30, RISCV::X31};
10336 
10337   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10338     if (unsigned Reg = State.AllocateReg(GPRList)) {
10339       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10340       return false;
10341     }
10342   }
10343 
10344   if (LocVT == MVT::f16) {
10345     static const MCPhysReg FPR16List[] = {
10346         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10347         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10348         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10349         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10350     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10351       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10352       return false;
10353     }
10354   }
10355 
10356   if (LocVT == MVT::f32) {
10357     static const MCPhysReg FPR32List[] = {
10358         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10359         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10360         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10361         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10362     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10363       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10364       return false;
10365     }
10366   }
10367 
10368   if (LocVT == MVT::f64) {
10369     static const MCPhysReg FPR64List[] = {
10370         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10371         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10372         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10373         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10374     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10375       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10376       return false;
10377     }
10378   }
10379 
10380   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10381     unsigned Offset4 = State.AllocateStack(4, Align(4));
10382     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10383     return false;
10384   }
10385 
10386   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10387     unsigned Offset5 = State.AllocateStack(8, Align(8));
10388     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10389     return false;
10390   }
10391 
10392   if (LocVT.isVector()) {
10393     if (unsigned Reg =
10394             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10395       // Fixed-length vectors are located in the corresponding scalable-vector
10396       // container types.
10397       if (ValVT.isFixedLengthVector())
10398         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10399       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10400     } else {
10401       // Try and pass the address via a "fast" GPR.
10402       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10403         LocInfo = CCValAssign::Indirect;
10404         LocVT = TLI.getSubtarget().getXLenVT();
10405         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10406       } else if (ValVT.isFixedLengthVector()) {
10407         auto StackAlign =
10408             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10409         unsigned StackOffset =
10410             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10411         State.addLoc(
10412             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10413       } else {
10414         // Can't pass scalable vectors on the stack.
10415         return true;
10416       }
10417     }
10418 
10419     return false;
10420   }
10421 
10422   return true; // CC didn't match.
10423 }
10424 
10425 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10426                          CCValAssign::LocInfo LocInfo,
10427                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10428 
10429   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10430     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10431     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10432     static const MCPhysReg GPRList[] = {
10433         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10434         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10435     if (unsigned Reg = State.AllocateReg(GPRList)) {
10436       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10437       return false;
10438     }
10439   }
10440 
10441   if (LocVT == MVT::f32) {
10442     // Pass in STG registers: F1, ..., F6
10443     //                        fs0 ... fs5
10444     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10445                                           RISCV::F18_F, RISCV::F19_F,
10446                                           RISCV::F20_F, RISCV::F21_F};
10447     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10448       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10449       return false;
10450     }
10451   }
10452 
10453   if (LocVT == MVT::f64) {
10454     // Pass in STG registers: D1, ..., D6
10455     //                        fs6 ... fs11
10456     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10457                                           RISCV::F24_D, RISCV::F25_D,
10458                                           RISCV::F26_D, RISCV::F27_D};
10459     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10460       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10461       return false;
10462     }
10463   }
10464 
10465   report_fatal_error("No registers left in GHC calling convention");
10466   return true;
10467 }
10468 
10469 // Transform physical registers into virtual registers.
10470 SDValue RISCVTargetLowering::LowerFormalArguments(
10471     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10472     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10473     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10474 
10475   MachineFunction &MF = DAG.getMachineFunction();
10476 
10477   switch (CallConv) {
10478   default:
10479     report_fatal_error("Unsupported calling convention");
10480   case CallingConv::C:
10481   case CallingConv::Fast:
10482     break;
10483   case CallingConv::GHC:
10484     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10485         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10486       report_fatal_error(
10487         "GHC calling convention requires the F and D instruction set extensions");
10488   }
10489 
10490   const Function &Func = MF.getFunction();
10491   if (Func.hasFnAttribute("interrupt")) {
10492     if (!Func.arg_empty())
10493       report_fatal_error(
10494         "Functions with the interrupt attribute cannot have arguments!");
10495 
10496     StringRef Kind =
10497       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10498 
10499     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10500       report_fatal_error(
10501         "Function interrupt attribute argument not supported!");
10502   }
10503 
10504   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10505   MVT XLenVT = Subtarget.getXLenVT();
10506   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10507   // Used with vargs to acumulate store chains.
10508   std::vector<SDValue> OutChains;
10509 
10510   // Assign locations to all of the incoming arguments.
10511   SmallVector<CCValAssign, 16> ArgLocs;
10512   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10513 
10514   if (CallConv == CallingConv::GHC)
10515     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10516   else
10517     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10518                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10519                                                    : CC_RISCV);
10520 
10521   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10522     CCValAssign &VA = ArgLocs[i];
10523     SDValue ArgValue;
10524     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10525     // case.
10526     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10527       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10528     else if (VA.isRegLoc())
10529       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10530     else
10531       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10532 
10533     if (VA.getLocInfo() == CCValAssign::Indirect) {
10534       // If the original argument was split and passed by reference (e.g. i128
10535       // on RV32), we need to load all parts of it here (using the same
10536       // address). Vectors may be partly split to registers and partly to the
10537       // stack, in which case the base address is partly offset and subsequent
10538       // stores are relative to that.
10539       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10540                                    MachinePointerInfo()));
10541       unsigned ArgIndex = Ins[i].OrigArgIndex;
10542       unsigned ArgPartOffset = Ins[i].PartOffset;
10543       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10544       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10545         CCValAssign &PartVA = ArgLocs[i + 1];
10546         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10547         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10548         if (PartVA.getValVT().isScalableVector())
10549           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10550         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10551         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10552                                      MachinePointerInfo()));
10553         ++i;
10554       }
10555       continue;
10556     }
10557     InVals.push_back(ArgValue);
10558   }
10559 
10560   if (IsVarArg) {
10561     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10562     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10563     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10564     MachineFrameInfo &MFI = MF.getFrameInfo();
10565     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10566     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10567 
10568     // Offset of the first variable argument from stack pointer, and size of
10569     // the vararg save area. For now, the varargs save area is either zero or
10570     // large enough to hold a0-a7.
10571     int VaArgOffset, VarArgsSaveSize;
10572 
10573     // If all registers are allocated, then all varargs must be passed on the
10574     // stack and we don't need to save any argregs.
10575     if (ArgRegs.size() == Idx) {
10576       VaArgOffset = CCInfo.getNextStackOffset();
10577       VarArgsSaveSize = 0;
10578     } else {
10579       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10580       VaArgOffset = -VarArgsSaveSize;
10581     }
10582 
10583     // Record the frame index of the first variable argument
10584     // which is a value necessary to VASTART.
10585     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10586     RVFI->setVarArgsFrameIndex(FI);
10587 
10588     // If saving an odd number of registers then create an extra stack slot to
10589     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10590     // offsets to even-numbered registered remain 2*XLEN-aligned.
10591     if (Idx % 2) {
10592       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10593       VarArgsSaveSize += XLenInBytes;
10594     }
10595 
10596     // Copy the integer registers that may have been used for passing varargs
10597     // to the vararg save area.
10598     for (unsigned I = Idx; I < ArgRegs.size();
10599          ++I, VaArgOffset += XLenInBytes) {
10600       const Register Reg = RegInfo.createVirtualRegister(RC);
10601       RegInfo.addLiveIn(ArgRegs[I], Reg);
10602       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10603       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10604       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10605       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10606                                    MachinePointerInfo::getFixedStack(MF, FI));
10607       cast<StoreSDNode>(Store.getNode())
10608           ->getMemOperand()
10609           ->setValue((Value *)nullptr);
10610       OutChains.push_back(Store);
10611     }
10612     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10613   }
10614 
10615   // All stores are grouped in one node to allow the matching between
10616   // the size of Ins and InVals. This only happens for vararg functions.
10617   if (!OutChains.empty()) {
10618     OutChains.push_back(Chain);
10619     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10620   }
10621 
10622   return Chain;
10623 }
10624 
10625 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10626 /// for tail call optimization.
10627 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10628 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10629     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10630     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10631 
10632   auto &Callee = CLI.Callee;
10633   auto CalleeCC = CLI.CallConv;
10634   auto &Outs = CLI.Outs;
10635   auto &Caller = MF.getFunction();
10636   auto CallerCC = Caller.getCallingConv();
10637 
10638   // Exception-handling functions need a special set of instructions to
10639   // indicate a return to the hardware. Tail-calling another function would
10640   // probably break this.
10641   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10642   // should be expanded as new function attributes are introduced.
10643   if (Caller.hasFnAttribute("interrupt"))
10644     return false;
10645 
10646   // Do not tail call opt if the stack is used to pass parameters.
10647   if (CCInfo.getNextStackOffset() != 0)
10648     return false;
10649 
10650   // Do not tail call opt if any parameters need to be passed indirectly.
10651   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10652   // passed indirectly. So the address of the value will be passed in a
10653   // register, or if not available, then the address is put on the stack. In
10654   // order to pass indirectly, space on the stack often needs to be allocated
10655   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10656   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10657   // are passed CCValAssign::Indirect.
10658   for (auto &VA : ArgLocs)
10659     if (VA.getLocInfo() == CCValAssign::Indirect)
10660       return false;
10661 
10662   // Do not tail call opt if either caller or callee uses struct return
10663   // semantics.
10664   auto IsCallerStructRet = Caller.hasStructRetAttr();
10665   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10666   if (IsCallerStructRet || IsCalleeStructRet)
10667     return false;
10668 
10669   // Externally-defined functions with weak linkage should not be
10670   // tail-called. The behaviour of branch instructions in this situation (as
10671   // used for tail calls) is implementation-defined, so we cannot rely on the
10672   // linker replacing the tail call with a return.
10673   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10674     const GlobalValue *GV = G->getGlobal();
10675     if (GV->hasExternalWeakLinkage())
10676       return false;
10677   }
10678 
10679   // The callee has to preserve all registers the caller needs to preserve.
10680   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10681   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10682   if (CalleeCC != CallerCC) {
10683     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10684     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10685       return false;
10686   }
10687 
10688   // Byval parameters hand the function a pointer directly into the stack area
10689   // we want to reuse during a tail call. Working around this *is* possible
10690   // but less efficient and uglier in LowerCall.
10691   for (auto &Arg : Outs)
10692     if (Arg.Flags.isByVal())
10693       return false;
10694 
10695   return true;
10696 }
10697 
10698 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10699   return DAG.getDataLayout().getPrefTypeAlign(
10700       VT.getTypeForEVT(*DAG.getContext()));
10701 }
10702 
10703 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10704 // and output parameter nodes.
10705 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10706                                        SmallVectorImpl<SDValue> &InVals) const {
10707   SelectionDAG &DAG = CLI.DAG;
10708   SDLoc &DL = CLI.DL;
10709   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10710   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10711   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10712   SDValue Chain = CLI.Chain;
10713   SDValue Callee = CLI.Callee;
10714   bool &IsTailCall = CLI.IsTailCall;
10715   CallingConv::ID CallConv = CLI.CallConv;
10716   bool IsVarArg = CLI.IsVarArg;
10717   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10718   MVT XLenVT = Subtarget.getXLenVT();
10719 
10720   MachineFunction &MF = DAG.getMachineFunction();
10721 
10722   // Analyze the operands of the call, assigning locations to each operand.
10723   SmallVector<CCValAssign, 16> ArgLocs;
10724   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10725 
10726   if (CallConv == CallingConv::GHC)
10727     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10728   else
10729     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10730                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10731                                                     : CC_RISCV);
10732 
10733   // Check if it's really possible to do a tail call.
10734   if (IsTailCall)
10735     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10736 
10737   if (IsTailCall)
10738     ++NumTailCalls;
10739   else if (CLI.CB && CLI.CB->isMustTailCall())
10740     report_fatal_error("failed to perform tail call elimination on a call "
10741                        "site marked musttail");
10742 
10743   // Get a count of how many bytes are to be pushed on the stack.
10744   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10745 
10746   // Create local copies for byval args
10747   SmallVector<SDValue, 8> ByValArgs;
10748   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10749     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10750     if (!Flags.isByVal())
10751       continue;
10752 
10753     SDValue Arg = OutVals[i];
10754     unsigned Size = Flags.getByValSize();
10755     Align Alignment = Flags.getNonZeroByValAlign();
10756 
10757     int FI =
10758         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10759     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10760     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10761 
10762     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10763                           /*IsVolatile=*/false,
10764                           /*AlwaysInline=*/false, IsTailCall,
10765                           MachinePointerInfo(), MachinePointerInfo());
10766     ByValArgs.push_back(FIPtr);
10767   }
10768 
10769   if (!IsTailCall)
10770     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10771 
10772   // Copy argument values to their designated locations.
10773   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10774   SmallVector<SDValue, 8> MemOpChains;
10775   SDValue StackPtr;
10776   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10777     CCValAssign &VA = ArgLocs[i];
10778     SDValue ArgValue = OutVals[i];
10779     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10780 
10781     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10782     bool IsF64OnRV32DSoftABI =
10783         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10784     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10785       SDValue SplitF64 = DAG.getNode(
10786           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10787       SDValue Lo = SplitF64.getValue(0);
10788       SDValue Hi = SplitF64.getValue(1);
10789 
10790       Register RegLo = VA.getLocReg();
10791       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10792 
10793       if (RegLo == RISCV::X17) {
10794         // Second half of f64 is passed on the stack.
10795         // Work out the address of the stack slot.
10796         if (!StackPtr.getNode())
10797           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10798         // Emit the store.
10799         MemOpChains.push_back(
10800             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10801       } else {
10802         // Second half of f64 is passed in another GPR.
10803         assert(RegLo < RISCV::X31 && "Invalid register pair");
10804         Register RegHigh = RegLo + 1;
10805         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10806       }
10807       continue;
10808     }
10809 
10810     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10811     // as any other MemLoc.
10812 
10813     // Promote the value if needed.
10814     // For now, only handle fully promoted and indirect arguments.
10815     if (VA.getLocInfo() == CCValAssign::Indirect) {
10816       // Store the argument in a stack slot and pass its address.
10817       Align StackAlign =
10818           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10819                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10820       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10821       // If the original argument was split (e.g. i128), we need
10822       // to store the required parts of it here (and pass just one address).
10823       // Vectors may be partly split to registers and partly to the stack, in
10824       // which case the base address is partly offset and subsequent stores are
10825       // relative to that.
10826       unsigned ArgIndex = Outs[i].OrigArgIndex;
10827       unsigned ArgPartOffset = Outs[i].PartOffset;
10828       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10829       // Calculate the total size to store. We don't have access to what we're
10830       // actually storing other than performing the loop and collecting the
10831       // info.
10832       SmallVector<std::pair<SDValue, SDValue>> Parts;
10833       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10834         SDValue PartValue = OutVals[i + 1];
10835         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10836         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10837         EVT PartVT = PartValue.getValueType();
10838         if (PartVT.isScalableVector())
10839           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10840         StoredSize += PartVT.getStoreSize();
10841         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10842         Parts.push_back(std::make_pair(PartValue, Offset));
10843         ++i;
10844       }
10845       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10846       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10847       MemOpChains.push_back(
10848           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10849                        MachinePointerInfo::getFixedStack(MF, FI)));
10850       for (const auto &Part : Parts) {
10851         SDValue PartValue = Part.first;
10852         SDValue PartOffset = Part.second;
10853         SDValue Address =
10854             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10855         MemOpChains.push_back(
10856             DAG.getStore(Chain, DL, PartValue, Address,
10857                          MachinePointerInfo::getFixedStack(MF, FI)));
10858       }
10859       ArgValue = SpillSlot;
10860     } else {
10861       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10862     }
10863 
10864     // Use local copy if it is a byval arg.
10865     if (Flags.isByVal())
10866       ArgValue = ByValArgs[j++];
10867 
10868     if (VA.isRegLoc()) {
10869       // Queue up the argument copies and emit them at the end.
10870       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10871     } else {
10872       assert(VA.isMemLoc() && "Argument not register or memory");
10873       assert(!IsTailCall && "Tail call not allowed if stack is used "
10874                             "for passing parameters");
10875 
10876       // Work out the address of the stack slot.
10877       if (!StackPtr.getNode())
10878         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10879       SDValue Address =
10880           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10881                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10882 
10883       // Emit the store.
10884       MemOpChains.push_back(
10885           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10886     }
10887   }
10888 
10889   // Join the stores, which are independent of one another.
10890   if (!MemOpChains.empty())
10891     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10892 
10893   SDValue Glue;
10894 
10895   // Build a sequence of copy-to-reg nodes, chained and glued together.
10896   for (auto &Reg : RegsToPass) {
10897     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10898     Glue = Chain.getValue(1);
10899   }
10900 
10901   // Validate that none of the argument registers have been marked as
10902   // reserved, if so report an error. Do the same for the return address if this
10903   // is not a tailcall.
10904   validateCCReservedRegs(RegsToPass, MF);
10905   if (!IsTailCall &&
10906       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10907     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10908         MF.getFunction(),
10909         "Return address register required, but has been reserved."});
10910 
10911   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10912   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10913   // split it and then direct call can be matched by PseudoCALL.
10914   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10915     const GlobalValue *GV = S->getGlobal();
10916 
10917     unsigned OpFlags = RISCVII::MO_CALL;
10918     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10919       OpFlags = RISCVII::MO_PLT;
10920 
10921     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10922   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10923     unsigned OpFlags = RISCVII::MO_CALL;
10924 
10925     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10926                                                  nullptr))
10927       OpFlags = RISCVII::MO_PLT;
10928 
10929     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10930   }
10931 
10932   // The first call operand is the chain and the second is the target address.
10933   SmallVector<SDValue, 8> Ops;
10934   Ops.push_back(Chain);
10935   Ops.push_back(Callee);
10936 
10937   // Add argument registers to the end of the list so that they are
10938   // known live into the call.
10939   for (auto &Reg : RegsToPass)
10940     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10941 
10942   if (!IsTailCall) {
10943     // Add a register mask operand representing the call-preserved registers.
10944     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10945     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10946     assert(Mask && "Missing call preserved mask for calling convention");
10947     Ops.push_back(DAG.getRegisterMask(Mask));
10948   }
10949 
10950   // Glue the call to the argument copies, if any.
10951   if (Glue.getNode())
10952     Ops.push_back(Glue);
10953 
10954   // Emit the call.
10955   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10956 
10957   if (IsTailCall) {
10958     MF.getFrameInfo().setHasTailCall();
10959     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10960   }
10961 
10962   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10963   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10964   Glue = Chain.getValue(1);
10965 
10966   // Mark the end of the call, which is glued to the call itself.
10967   Chain = DAG.getCALLSEQ_END(Chain,
10968                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10969                              DAG.getConstant(0, DL, PtrVT, true),
10970                              Glue, DL);
10971   Glue = Chain.getValue(1);
10972 
10973   // Assign locations to each value returned by this call.
10974   SmallVector<CCValAssign, 16> RVLocs;
10975   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10976   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10977 
10978   // Copy all of the result registers out of their specified physreg.
10979   for (auto &VA : RVLocs) {
10980     // Copy the value out
10981     SDValue RetValue =
10982         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10983     // Glue the RetValue to the end of the call sequence
10984     Chain = RetValue.getValue(1);
10985     Glue = RetValue.getValue(2);
10986 
10987     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10988       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10989       SDValue RetValue2 =
10990           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10991       Chain = RetValue2.getValue(1);
10992       Glue = RetValue2.getValue(2);
10993       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10994                              RetValue2);
10995     }
10996 
10997     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10998 
10999     InVals.push_back(RetValue);
11000   }
11001 
11002   return Chain;
11003 }
11004 
11005 bool RISCVTargetLowering::CanLowerReturn(
11006     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
11007     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
11008   SmallVector<CCValAssign, 16> RVLocs;
11009   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
11010 
11011   Optional<unsigned> FirstMaskArgument;
11012   if (Subtarget.hasVInstructions())
11013     FirstMaskArgument = preAssignMask(Outs);
11014 
11015   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
11016     MVT VT = Outs[i].VT;
11017     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
11018     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
11019     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
11020                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
11021                  *this, FirstMaskArgument))
11022       return false;
11023   }
11024   return true;
11025 }
11026 
11027 SDValue
11028 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
11029                                  bool IsVarArg,
11030                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
11031                                  const SmallVectorImpl<SDValue> &OutVals,
11032                                  const SDLoc &DL, SelectionDAG &DAG) const {
11033   const MachineFunction &MF = DAG.getMachineFunction();
11034   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11035 
11036   // Stores the assignment of the return value to a location.
11037   SmallVector<CCValAssign, 16> RVLocs;
11038 
11039   // Info about the registers and stack slot.
11040   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
11041                  *DAG.getContext());
11042 
11043   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
11044                     nullptr, CC_RISCV);
11045 
11046   if (CallConv == CallingConv::GHC && !RVLocs.empty())
11047     report_fatal_error("GHC functions return void only");
11048 
11049   SDValue Glue;
11050   SmallVector<SDValue, 4> RetOps(1, Chain);
11051 
11052   // Copy the result values into the output registers.
11053   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
11054     SDValue Val = OutVals[i];
11055     CCValAssign &VA = RVLocs[i];
11056     assert(VA.isRegLoc() && "Can only return in registers!");
11057 
11058     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
11059       // Handle returning f64 on RV32D with a soft float ABI.
11060       assert(VA.isRegLoc() && "Expected return via registers");
11061       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
11062                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
11063       SDValue Lo = SplitF64.getValue(0);
11064       SDValue Hi = SplitF64.getValue(1);
11065       Register RegLo = VA.getLocReg();
11066       assert(RegLo < RISCV::X31 && "Invalid register pair");
11067       Register RegHi = RegLo + 1;
11068 
11069       if (STI.isRegisterReservedByUser(RegLo) ||
11070           STI.isRegisterReservedByUser(RegHi))
11071         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11072             MF.getFunction(),
11073             "Return value register required, but has been reserved."});
11074 
11075       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
11076       Glue = Chain.getValue(1);
11077       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
11078       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
11079       Glue = Chain.getValue(1);
11080       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
11081     } else {
11082       // Handle a 'normal' return.
11083       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
11084       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
11085 
11086       if (STI.isRegisterReservedByUser(VA.getLocReg()))
11087         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
11088             MF.getFunction(),
11089             "Return value register required, but has been reserved."});
11090 
11091       // Guarantee that all emitted copies are stuck together.
11092       Glue = Chain.getValue(1);
11093       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
11094     }
11095   }
11096 
11097   RetOps[0] = Chain; // Update chain.
11098 
11099   // Add the glue node if we have it.
11100   if (Glue.getNode()) {
11101     RetOps.push_back(Glue);
11102   }
11103 
11104   unsigned RetOpc = RISCVISD::RET_FLAG;
11105   // Interrupt service routines use different return instructions.
11106   const Function &Func = DAG.getMachineFunction().getFunction();
11107   if (Func.hasFnAttribute("interrupt")) {
11108     if (!Func.getReturnType()->isVoidTy())
11109       report_fatal_error(
11110           "Functions with the interrupt attribute must have void return type!");
11111 
11112     MachineFunction &MF = DAG.getMachineFunction();
11113     StringRef Kind =
11114       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
11115 
11116     if (Kind == "user")
11117       RetOpc = RISCVISD::URET_FLAG;
11118     else if (Kind == "supervisor")
11119       RetOpc = RISCVISD::SRET_FLAG;
11120     else
11121       RetOpc = RISCVISD::MRET_FLAG;
11122   }
11123 
11124   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
11125 }
11126 
11127 void RISCVTargetLowering::validateCCReservedRegs(
11128     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
11129     MachineFunction &MF) const {
11130   const Function &F = MF.getFunction();
11131   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
11132 
11133   if (llvm::any_of(Regs, [&STI](auto Reg) {
11134         return STI.isRegisterReservedByUser(Reg.first);
11135       }))
11136     F.getContext().diagnose(DiagnosticInfoUnsupported{
11137         F, "Argument register required, but has been reserved."});
11138 }
11139 
11140 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
11141   return CI->isTailCall();
11142 }
11143 
11144 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
11145 #define NODE_NAME_CASE(NODE)                                                   \
11146   case RISCVISD::NODE:                                                         \
11147     return "RISCVISD::" #NODE;
11148   // clang-format off
11149   switch ((RISCVISD::NodeType)Opcode) {
11150   case RISCVISD::FIRST_NUMBER:
11151     break;
11152   NODE_NAME_CASE(RET_FLAG)
11153   NODE_NAME_CASE(URET_FLAG)
11154   NODE_NAME_CASE(SRET_FLAG)
11155   NODE_NAME_CASE(MRET_FLAG)
11156   NODE_NAME_CASE(CALL)
11157   NODE_NAME_CASE(SELECT_CC)
11158   NODE_NAME_CASE(BR_CC)
11159   NODE_NAME_CASE(BuildPairF64)
11160   NODE_NAME_CASE(SplitF64)
11161   NODE_NAME_CASE(TAIL)
11162   NODE_NAME_CASE(ADD_LO)
11163   NODE_NAME_CASE(HI)
11164   NODE_NAME_CASE(LLA)
11165   NODE_NAME_CASE(MULHSU)
11166   NODE_NAME_CASE(SLLW)
11167   NODE_NAME_CASE(SRAW)
11168   NODE_NAME_CASE(SRLW)
11169   NODE_NAME_CASE(DIVW)
11170   NODE_NAME_CASE(DIVUW)
11171   NODE_NAME_CASE(REMUW)
11172   NODE_NAME_CASE(ROLW)
11173   NODE_NAME_CASE(RORW)
11174   NODE_NAME_CASE(CLZW)
11175   NODE_NAME_CASE(CTZW)
11176   NODE_NAME_CASE(FSLW)
11177   NODE_NAME_CASE(FSRW)
11178   NODE_NAME_CASE(FSL)
11179   NODE_NAME_CASE(FSR)
11180   NODE_NAME_CASE(FMV_H_X)
11181   NODE_NAME_CASE(FMV_X_ANYEXTH)
11182   NODE_NAME_CASE(FMV_X_SIGNEXTH)
11183   NODE_NAME_CASE(FMV_W_X_RV64)
11184   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
11185   NODE_NAME_CASE(FCVT_X)
11186   NODE_NAME_CASE(FCVT_XU)
11187   NODE_NAME_CASE(FCVT_W_RV64)
11188   NODE_NAME_CASE(FCVT_WU_RV64)
11189   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
11190   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
11191   NODE_NAME_CASE(READ_CYCLE_WIDE)
11192   NODE_NAME_CASE(GREV)
11193   NODE_NAME_CASE(GREVW)
11194   NODE_NAME_CASE(GORC)
11195   NODE_NAME_CASE(GORCW)
11196   NODE_NAME_CASE(SHFL)
11197   NODE_NAME_CASE(SHFLW)
11198   NODE_NAME_CASE(UNSHFL)
11199   NODE_NAME_CASE(UNSHFLW)
11200   NODE_NAME_CASE(BFP)
11201   NODE_NAME_CASE(BFPW)
11202   NODE_NAME_CASE(BCOMPRESS)
11203   NODE_NAME_CASE(BCOMPRESSW)
11204   NODE_NAME_CASE(BDECOMPRESS)
11205   NODE_NAME_CASE(BDECOMPRESSW)
11206   NODE_NAME_CASE(VMV_V_X_VL)
11207   NODE_NAME_CASE(VFMV_V_F_VL)
11208   NODE_NAME_CASE(VMV_X_S)
11209   NODE_NAME_CASE(VMV_S_X_VL)
11210   NODE_NAME_CASE(VFMV_S_F_VL)
11211   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
11212   NODE_NAME_CASE(READ_VLENB)
11213   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
11214   NODE_NAME_CASE(VSLIDEUP_VL)
11215   NODE_NAME_CASE(VSLIDE1UP_VL)
11216   NODE_NAME_CASE(VSLIDEDOWN_VL)
11217   NODE_NAME_CASE(VSLIDE1DOWN_VL)
11218   NODE_NAME_CASE(VID_VL)
11219   NODE_NAME_CASE(VFNCVT_ROD_VL)
11220   NODE_NAME_CASE(VECREDUCE_ADD_VL)
11221   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
11222   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
11223   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
11224   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
11225   NODE_NAME_CASE(VECREDUCE_AND_VL)
11226   NODE_NAME_CASE(VECREDUCE_OR_VL)
11227   NODE_NAME_CASE(VECREDUCE_XOR_VL)
11228   NODE_NAME_CASE(VECREDUCE_FADD_VL)
11229   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
11230   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
11231   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
11232   NODE_NAME_CASE(ADD_VL)
11233   NODE_NAME_CASE(AND_VL)
11234   NODE_NAME_CASE(MUL_VL)
11235   NODE_NAME_CASE(OR_VL)
11236   NODE_NAME_CASE(SDIV_VL)
11237   NODE_NAME_CASE(SHL_VL)
11238   NODE_NAME_CASE(SREM_VL)
11239   NODE_NAME_CASE(SRA_VL)
11240   NODE_NAME_CASE(SRL_VL)
11241   NODE_NAME_CASE(SUB_VL)
11242   NODE_NAME_CASE(UDIV_VL)
11243   NODE_NAME_CASE(UREM_VL)
11244   NODE_NAME_CASE(XOR_VL)
11245   NODE_NAME_CASE(SADDSAT_VL)
11246   NODE_NAME_CASE(UADDSAT_VL)
11247   NODE_NAME_CASE(SSUBSAT_VL)
11248   NODE_NAME_CASE(USUBSAT_VL)
11249   NODE_NAME_CASE(FADD_VL)
11250   NODE_NAME_CASE(FSUB_VL)
11251   NODE_NAME_CASE(FMUL_VL)
11252   NODE_NAME_CASE(FDIV_VL)
11253   NODE_NAME_CASE(FNEG_VL)
11254   NODE_NAME_CASE(FABS_VL)
11255   NODE_NAME_CASE(FSQRT_VL)
11256   NODE_NAME_CASE(FMA_VL)
11257   NODE_NAME_CASE(FCOPYSIGN_VL)
11258   NODE_NAME_CASE(SMIN_VL)
11259   NODE_NAME_CASE(SMAX_VL)
11260   NODE_NAME_CASE(UMIN_VL)
11261   NODE_NAME_CASE(UMAX_VL)
11262   NODE_NAME_CASE(FMINNUM_VL)
11263   NODE_NAME_CASE(FMAXNUM_VL)
11264   NODE_NAME_CASE(MULHS_VL)
11265   NODE_NAME_CASE(MULHU_VL)
11266   NODE_NAME_CASE(FP_TO_SINT_VL)
11267   NODE_NAME_CASE(FP_TO_UINT_VL)
11268   NODE_NAME_CASE(SINT_TO_FP_VL)
11269   NODE_NAME_CASE(UINT_TO_FP_VL)
11270   NODE_NAME_CASE(FP_EXTEND_VL)
11271   NODE_NAME_CASE(FP_ROUND_VL)
11272   NODE_NAME_CASE(VWMUL_VL)
11273   NODE_NAME_CASE(VWMULU_VL)
11274   NODE_NAME_CASE(VWMULSU_VL)
11275   NODE_NAME_CASE(VWADD_VL)
11276   NODE_NAME_CASE(VWADDU_VL)
11277   NODE_NAME_CASE(VWSUB_VL)
11278   NODE_NAME_CASE(VWSUBU_VL)
11279   NODE_NAME_CASE(VWADD_W_VL)
11280   NODE_NAME_CASE(VWADDU_W_VL)
11281   NODE_NAME_CASE(VWSUB_W_VL)
11282   NODE_NAME_CASE(VWSUBU_W_VL)
11283   NODE_NAME_CASE(SETCC_VL)
11284   NODE_NAME_CASE(VSELECT_VL)
11285   NODE_NAME_CASE(VP_MERGE_VL)
11286   NODE_NAME_CASE(VMAND_VL)
11287   NODE_NAME_CASE(VMOR_VL)
11288   NODE_NAME_CASE(VMXOR_VL)
11289   NODE_NAME_CASE(VMCLR_VL)
11290   NODE_NAME_CASE(VMSET_VL)
11291   NODE_NAME_CASE(VRGATHER_VX_VL)
11292   NODE_NAME_CASE(VRGATHER_VV_VL)
11293   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11294   NODE_NAME_CASE(VSEXT_VL)
11295   NODE_NAME_CASE(VZEXT_VL)
11296   NODE_NAME_CASE(VCPOP_VL)
11297   NODE_NAME_CASE(READ_CSR)
11298   NODE_NAME_CASE(WRITE_CSR)
11299   NODE_NAME_CASE(SWAP_CSR)
11300   }
11301   // clang-format on
11302   return nullptr;
11303 #undef NODE_NAME_CASE
11304 }
11305 
11306 /// getConstraintType - Given a constraint letter, return the type of
11307 /// constraint it is for this target.
11308 RISCVTargetLowering::ConstraintType
11309 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11310   if (Constraint.size() == 1) {
11311     switch (Constraint[0]) {
11312     default:
11313       break;
11314     case 'f':
11315       return C_RegisterClass;
11316     case 'I':
11317     case 'J':
11318     case 'K':
11319       return C_Immediate;
11320     case 'A':
11321       return C_Memory;
11322     case 'S': // A symbolic address
11323       return C_Other;
11324     }
11325   } else {
11326     if (Constraint == "vr" || Constraint == "vm")
11327       return C_RegisterClass;
11328   }
11329   return TargetLowering::getConstraintType(Constraint);
11330 }
11331 
11332 std::pair<unsigned, const TargetRegisterClass *>
11333 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11334                                                   StringRef Constraint,
11335                                                   MVT VT) const {
11336   // First, see if this is a constraint that directly corresponds to a
11337   // RISCV register class.
11338   if (Constraint.size() == 1) {
11339     switch (Constraint[0]) {
11340     case 'r':
11341       // TODO: Support fixed vectors up to XLen for P extension?
11342       if (VT.isVector())
11343         break;
11344       return std::make_pair(0U, &RISCV::GPRRegClass);
11345     case 'f':
11346       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11347         return std::make_pair(0U, &RISCV::FPR16RegClass);
11348       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11349         return std::make_pair(0U, &RISCV::FPR32RegClass);
11350       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11351         return std::make_pair(0U, &RISCV::FPR64RegClass);
11352       break;
11353     default:
11354       break;
11355     }
11356   } else if (Constraint == "vr") {
11357     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11358                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11359       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11360         return std::make_pair(0U, RC);
11361     }
11362   } else if (Constraint == "vm") {
11363     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11364       return std::make_pair(0U, &RISCV::VMV0RegClass);
11365   }
11366 
11367   // Clang will correctly decode the usage of register name aliases into their
11368   // official names. However, other frontends like `rustc` do not. This allows
11369   // users of these frontends to use the ABI names for registers in LLVM-style
11370   // register constraints.
11371   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11372                                .Case("{zero}", RISCV::X0)
11373                                .Case("{ra}", RISCV::X1)
11374                                .Case("{sp}", RISCV::X2)
11375                                .Case("{gp}", RISCV::X3)
11376                                .Case("{tp}", RISCV::X4)
11377                                .Case("{t0}", RISCV::X5)
11378                                .Case("{t1}", RISCV::X6)
11379                                .Case("{t2}", RISCV::X7)
11380                                .Cases("{s0}", "{fp}", RISCV::X8)
11381                                .Case("{s1}", RISCV::X9)
11382                                .Case("{a0}", RISCV::X10)
11383                                .Case("{a1}", RISCV::X11)
11384                                .Case("{a2}", RISCV::X12)
11385                                .Case("{a3}", RISCV::X13)
11386                                .Case("{a4}", RISCV::X14)
11387                                .Case("{a5}", RISCV::X15)
11388                                .Case("{a6}", RISCV::X16)
11389                                .Case("{a7}", RISCV::X17)
11390                                .Case("{s2}", RISCV::X18)
11391                                .Case("{s3}", RISCV::X19)
11392                                .Case("{s4}", RISCV::X20)
11393                                .Case("{s5}", RISCV::X21)
11394                                .Case("{s6}", RISCV::X22)
11395                                .Case("{s7}", RISCV::X23)
11396                                .Case("{s8}", RISCV::X24)
11397                                .Case("{s9}", RISCV::X25)
11398                                .Case("{s10}", RISCV::X26)
11399                                .Case("{s11}", RISCV::X27)
11400                                .Case("{t3}", RISCV::X28)
11401                                .Case("{t4}", RISCV::X29)
11402                                .Case("{t5}", RISCV::X30)
11403                                .Case("{t6}", RISCV::X31)
11404                                .Default(RISCV::NoRegister);
11405   if (XRegFromAlias != RISCV::NoRegister)
11406     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11407 
11408   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11409   // TableGen record rather than the AsmName to choose registers for InlineAsm
11410   // constraints, plus we want to match those names to the widest floating point
11411   // register type available, manually select floating point registers here.
11412   //
11413   // The second case is the ABI name of the register, so that frontends can also
11414   // use the ABI names in register constraint lists.
11415   if (Subtarget.hasStdExtF()) {
11416     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11417                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11418                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11419                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11420                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11421                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11422                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11423                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11424                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11425                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11426                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11427                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11428                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11429                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11430                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11431                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11432                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11433                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11434                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11435                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11436                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11437                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11438                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11439                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11440                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11441                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11442                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11443                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11444                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11445                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11446                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11447                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11448                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11449                         .Default(RISCV::NoRegister);
11450     if (FReg != RISCV::NoRegister) {
11451       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11452       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11453         unsigned RegNo = FReg - RISCV::F0_F;
11454         unsigned DReg = RISCV::F0_D + RegNo;
11455         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11456       }
11457       if (VT == MVT::f32 || VT == MVT::Other)
11458         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11459       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11460         unsigned RegNo = FReg - RISCV::F0_F;
11461         unsigned HReg = RISCV::F0_H + RegNo;
11462         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11463       }
11464     }
11465   }
11466 
11467   if (Subtarget.hasVInstructions()) {
11468     Register VReg = StringSwitch<Register>(Constraint.lower())
11469                         .Case("{v0}", RISCV::V0)
11470                         .Case("{v1}", RISCV::V1)
11471                         .Case("{v2}", RISCV::V2)
11472                         .Case("{v3}", RISCV::V3)
11473                         .Case("{v4}", RISCV::V4)
11474                         .Case("{v5}", RISCV::V5)
11475                         .Case("{v6}", RISCV::V6)
11476                         .Case("{v7}", RISCV::V7)
11477                         .Case("{v8}", RISCV::V8)
11478                         .Case("{v9}", RISCV::V9)
11479                         .Case("{v10}", RISCV::V10)
11480                         .Case("{v11}", RISCV::V11)
11481                         .Case("{v12}", RISCV::V12)
11482                         .Case("{v13}", RISCV::V13)
11483                         .Case("{v14}", RISCV::V14)
11484                         .Case("{v15}", RISCV::V15)
11485                         .Case("{v16}", RISCV::V16)
11486                         .Case("{v17}", RISCV::V17)
11487                         .Case("{v18}", RISCV::V18)
11488                         .Case("{v19}", RISCV::V19)
11489                         .Case("{v20}", RISCV::V20)
11490                         .Case("{v21}", RISCV::V21)
11491                         .Case("{v22}", RISCV::V22)
11492                         .Case("{v23}", RISCV::V23)
11493                         .Case("{v24}", RISCV::V24)
11494                         .Case("{v25}", RISCV::V25)
11495                         .Case("{v26}", RISCV::V26)
11496                         .Case("{v27}", RISCV::V27)
11497                         .Case("{v28}", RISCV::V28)
11498                         .Case("{v29}", RISCV::V29)
11499                         .Case("{v30}", RISCV::V30)
11500                         .Case("{v31}", RISCV::V31)
11501                         .Default(RISCV::NoRegister);
11502     if (VReg != RISCV::NoRegister) {
11503       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11504         return std::make_pair(VReg, &RISCV::VMRegClass);
11505       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11506         return std::make_pair(VReg, &RISCV::VRRegClass);
11507       for (const auto *RC :
11508            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11509         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11510           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11511           return std::make_pair(VReg, RC);
11512         }
11513       }
11514     }
11515   }
11516 
11517   std::pair<Register, const TargetRegisterClass *> Res =
11518       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11519 
11520   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11521   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11522   // Subtarget into account.
11523   if (Res.second == &RISCV::GPRF16RegClass ||
11524       Res.second == &RISCV::GPRF32RegClass ||
11525       Res.second == &RISCV::GPRF64RegClass)
11526     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11527 
11528   return Res;
11529 }
11530 
11531 unsigned
11532 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11533   // Currently only support length 1 constraints.
11534   if (ConstraintCode.size() == 1) {
11535     switch (ConstraintCode[0]) {
11536     case 'A':
11537       return InlineAsm::Constraint_A;
11538     default:
11539       break;
11540     }
11541   }
11542 
11543   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11544 }
11545 
11546 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11547     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11548     SelectionDAG &DAG) const {
11549   // Currently only support length 1 constraints.
11550   if (Constraint.length() == 1) {
11551     switch (Constraint[0]) {
11552     case 'I':
11553       // Validate & create a 12-bit signed immediate operand.
11554       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11555         uint64_t CVal = C->getSExtValue();
11556         if (isInt<12>(CVal))
11557           Ops.push_back(
11558               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11559       }
11560       return;
11561     case 'J':
11562       // Validate & create an integer zero operand.
11563       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11564         if (C->getZExtValue() == 0)
11565           Ops.push_back(
11566               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11567       return;
11568     case 'K':
11569       // Validate & create a 5-bit unsigned immediate operand.
11570       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11571         uint64_t CVal = C->getZExtValue();
11572         if (isUInt<5>(CVal))
11573           Ops.push_back(
11574               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11575       }
11576       return;
11577     case 'S':
11578       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11579         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11580                                                  GA->getValueType(0)));
11581       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11582         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11583                                                 BA->getValueType(0)));
11584       }
11585       return;
11586     default:
11587       break;
11588     }
11589   }
11590   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11591 }
11592 
11593 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11594                                                    Instruction *Inst,
11595                                                    AtomicOrdering Ord) const {
11596   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11597     return Builder.CreateFence(Ord);
11598   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11599     return Builder.CreateFence(AtomicOrdering::Release);
11600   return nullptr;
11601 }
11602 
11603 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11604                                                     Instruction *Inst,
11605                                                     AtomicOrdering Ord) const {
11606   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11607     return Builder.CreateFence(AtomicOrdering::Acquire);
11608   return nullptr;
11609 }
11610 
11611 TargetLowering::AtomicExpansionKind
11612 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11613   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11614   // point operations can't be used in an lr/sc sequence without breaking the
11615   // forward-progress guarantee.
11616   if (AI->isFloatingPointOperation())
11617     return AtomicExpansionKind::CmpXChg;
11618 
11619   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11620   if (Size == 8 || Size == 16)
11621     return AtomicExpansionKind::MaskedIntrinsic;
11622   return AtomicExpansionKind::None;
11623 }
11624 
11625 static Intrinsic::ID
11626 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11627   if (XLen == 32) {
11628     switch (BinOp) {
11629     default:
11630       llvm_unreachable("Unexpected AtomicRMW BinOp");
11631     case AtomicRMWInst::Xchg:
11632       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11633     case AtomicRMWInst::Add:
11634       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11635     case AtomicRMWInst::Sub:
11636       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11637     case AtomicRMWInst::Nand:
11638       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11639     case AtomicRMWInst::Max:
11640       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11641     case AtomicRMWInst::Min:
11642       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11643     case AtomicRMWInst::UMax:
11644       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11645     case AtomicRMWInst::UMin:
11646       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11647     }
11648   }
11649 
11650   if (XLen == 64) {
11651     switch (BinOp) {
11652     default:
11653       llvm_unreachable("Unexpected AtomicRMW BinOp");
11654     case AtomicRMWInst::Xchg:
11655       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11656     case AtomicRMWInst::Add:
11657       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11658     case AtomicRMWInst::Sub:
11659       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11660     case AtomicRMWInst::Nand:
11661       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11662     case AtomicRMWInst::Max:
11663       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11664     case AtomicRMWInst::Min:
11665       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11666     case AtomicRMWInst::UMax:
11667       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11668     case AtomicRMWInst::UMin:
11669       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11670     }
11671   }
11672 
11673   llvm_unreachable("Unexpected XLen\n");
11674 }
11675 
11676 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11677     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11678     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11679   unsigned XLen = Subtarget.getXLen();
11680   Value *Ordering =
11681       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11682   Type *Tys[] = {AlignedAddr->getType()};
11683   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11684       AI->getModule(),
11685       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11686 
11687   if (XLen == 64) {
11688     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11689     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11690     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11691   }
11692 
11693   Value *Result;
11694 
11695   // Must pass the shift amount needed to sign extend the loaded value prior
11696   // to performing a signed comparison for min/max. ShiftAmt is the number of
11697   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11698   // is the number of bits to left+right shift the value in order to
11699   // sign-extend.
11700   if (AI->getOperation() == AtomicRMWInst::Min ||
11701       AI->getOperation() == AtomicRMWInst::Max) {
11702     const DataLayout &DL = AI->getModule()->getDataLayout();
11703     unsigned ValWidth =
11704         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11705     Value *SextShamt =
11706         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11707     Result = Builder.CreateCall(LrwOpScwLoop,
11708                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11709   } else {
11710     Result =
11711         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11712   }
11713 
11714   if (XLen == 64)
11715     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11716   return Result;
11717 }
11718 
11719 TargetLowering::AtomicExpansionKind
11720 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11721     AtomicCmpXchgInst *CI) const {
11722   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11723   if (Size == 8 || Size == 16)
11724     return AtomicExpansionKind::MaskedIntrinsic;
11725   return AtomicExpansionKind::None;
11726 }
11727 
11728 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11729     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11730     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11731   unsigned XLen = Subtarget.getXLen();
11732   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11733   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11734   if (XLen == 64) {
11735     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11736     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11737     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11738     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11739   }
11740   Type *Tys[] = {AlignedAddr->getType()};
11741   Function *MaskedCmpXchg =
11742       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11743   Value *Result = Builder.CreateCall(
11744       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11745   if (XLen == 64)
11746     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11747   return Result;
11748 }
11749 
11750 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT IndexVT,
11751                                                         EVT DataVT) const {
11752   return false;
11753 }
11754 
11755 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11756                                                EVT VT) const {
11757   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11758     return false;
11759 
11760   switch (FPVT.getSimpleVT().SimpleTy) {
11761   case MVT::f16:
11762     return Subtarget.hasStdExtZfh();
11763   case MVT::f32:
11764     return Subtarget.hasStdExtF();
11765   case MVT::f64:
11766     return Subtarget.hasStdExtD();
11767   default:
11768     return false;
11769   }
11770 }
11771 
11772 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11773   // If we are using the small code model, we can reduce size of jump table
11774   // entry to 4 bytes.
11775   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11776       getTargetMachine().getCodeModel() == CodeModel::Small) {
11777     return MachineJumpTableInfo::EK_Custom32;
11778   }
11779   return TargetLowering::getJumpTableEncoding();
11780 }
11781 
11782 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11783     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11784     unsigned uid, MCContext &Ctx) const {
11785   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11786          getTargetMachine().getCodeModel() == CodeModel::Small);
11787   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11788 }
11789 
11790 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11791                                                      EVT VT) const {
11792   VT = VT.getScalarType();
11793 
11794   if (!VT.isSimple())
11795     return false;
11796 
11797   switch (VT.getSimpleVT().SimpleTy) {
11798   case MVT::f16:
11799     return Subtarget.hasStdExtZfh();
11800   case MVT::f32:
11801     return Subtarget.hasStdExtF();
11802   case MVT::f64:
11803     return Subtarget.hasStdExtD();
11804   default:
11805     break;
11806   }
11807 
11808   return false;
11809 }
11810 
11811 Register RISCVTargetLowering::getExceptionPointerRegister(
11812     const Constant *PersonalityFn) const {
11813   return RISCV::X10;
11814 }
11815 
11816 Register RISCVTargetLowering::getExceptionSelectorRegister(
11817     const Constant *PersonalityFn) const {
11818   return RISCV::X11;
11819 }
11820 
11821 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11822   // Return false to suppress the unnecessary extensions if the LibCall
11823   // arguments or return value is f32 type for LP64 ABI.
11824   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11825   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11826     return false;
11827 
11828   return true;
11829 }
11830 
11831 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11832   if (Subtarget.is64Bit() && Type == MVT::i32)
11833     return true;
11834 
11835   return IsSigned;
11836 }
11837 
11838 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11839                                                  SDValue C) const {
11840   // Check integral scalar types.
11841   if (VT.isScalarInteger()) {
11842     // Omit the optimization if the sub target has the M extension and the data
11843     // size exceeds XLen.
11844     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11845       return false;
11846     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11847       // Break the MUL to a SLLI and an ADD/SUB.
11848       const APInt &Imm = ConstNode->getAPIntValue();
11849       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11850           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11851         return true;
11852       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11853       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11854           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11855            (Imm - 8).isPowerOf2()))
11856         return true;
11857       // Omit the following optimization if the sub target has the M extension
11858       // and the data size >= XLen.
11859       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11860         return false;
11861       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11862       // a pair of LUI/ADDI.
11863       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11864         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11865         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11866             (1 - ImmS).isPowerOf2())
11867         return true;
11868       }
11869     }
11870   }
11871 
11872   return false;
11873 }
11874 
11875 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11876                                                       SDValue ConstNode) const {
11877   // Let the DAGCombiner decide for vectors.
11878   EVT VT = AddNode.getValueType();
11879   if (VT.isVector())
11880     return true;
11881 
11882   // Let the DAGCombiner decide for larger types.
11883   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11884     return true;
11885 
11886   // It is worse if c1 is simm12 while c1*c2 is not.
11887   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11888   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11889   const APInt &C1 = C1Node->getAPIntValue();
11890   const APInt &C2 = C2Node->getAPIntValue();
11891   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11892     return false;
11893 
11894   // Default to true and let the DAGCombiner decide.
11895   return true;
11896 }
11897 
11898 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11899     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11900     bool *Fast) const {
11901   if (!VT.isVector()) {
11902     if (Fast)
11903       *Fast = false;
11904     return Subtarget.enableUnalignedScalarMem();
11905   }
11906 
11907   // All vector implementations must support element alignment
11908   EVT ElemVT = VT.getVectorElementType();
11909   if (Alignment >= ElemVT.getStoreSize()) {
11910     if (Fast)
11911       *Fast = true;
11912     return true;
11913   }
11914 
11915   return false;
11916 }
11917 
11918 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11919     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11920     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11921   bool IsABIRegCopy = CC.hasValue();
11922   EVT ValueVT = Val.getValueType();
11923   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11924     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11925     // and cast to f32.
11926     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11927     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11928     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11929                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11930     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11931     Parts[0] = Val;
11932     return true;
11933   }
11934 
11935   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11936     LLVMContext &Context = *DAG.getContext();
11937     EVT ValueEltVT = ValueVT.getVectorElementType();
11938     EVT PartEltVT = PartVT.getVectorElementType();
11939     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11940     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11941     if (PartVTBitSize % ValueVTBitSize == 0) {
11942       assert(PartVTBitSize >= ValueVTBitSize);
11943       // If the element types are different, bitcast to the same element type of
11944       // PartVT first.
11945       // Give an example here, we want copy a <vscale x 1 x i8> value to
11946       // <vscale x 4 x i16>.
11947       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11948       // subvector, then we can bitcast to <vscale x 4 x i16>.
11949       if (ValueEltVT != PartEltVT) {
11950         if (PartVTBitSize > ValueVTBitSize) {
11951           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11952           assert(Count != 0 && "The number of element should not be zero.");
11953           EVT SameEltTypeVT =
11954               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11955           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11956                             DAG.getUNDEF(SameEltTypeVT), Val,
11957                             DAG.getVectorIdxConstant(0, DL));
11958         }
11959         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11960       } else {
11961         Val =
11962             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11963                         Val, DAG.getVectorIdxConstant(0, DL));
11964       }
11965       Parts[0] = Val;
11966       return true;
11967     }
11968   }
11969   return false;
11970 }
11971 
11972 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11973     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11974     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11975   bool IsABIRegCopy = CC.hasValue();
11976   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11977     SDValue Val = Parts[0];
11978 
11979     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11980     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11981     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11982     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11983     return Val;
11984   }
11985 
11986   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11987     LLVMContext &Context = *DAG.getContext();
11988     SDValue Val = Parts[0];
11989     EVT ValueEltVT = ValueVT.getVectorElementType();
11990     EVT PartEltVT = PartVT.getVectorElementType();
11991     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11992     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11993     if (PartVTBitSize % ValueVTBitSize == 0) {
11994       assert(PartVTBitSize >= ValueVTBitSize);
11995       EVT SameEltTypeVT = ValueVT;
11996       // If the element types are different, convert it to the same element type
11997       // of PartVT.
11998       // Give an example here, we want copy a <vscale x 1 x i8> value from
11999       // <vscale x 4 x i16>.
12000       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
12001       // then we can extract <vscale x 1 x i8>.
12002       if (ValueEltVT != PartEltVT) {
12003         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
12004         assert(Count != 0 && "The number of element should not be zero.");
12005         SameEltTypeVT =
12006             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
12007         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
12008       }
12009       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
12010                         DAG.getVectorIdxConstant(0, DL));
12011       return Val;
12012     }
12013   }
12014   return SDValue();
12015 }
12016 
12017 SDValue
12018 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
12019                                    SelectionDAG &DAG,
12020                                    SmallVectorImpl<SDNode *> &Created) const {
12021   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
12022   if (isIntDivCheap(N->getValueType(0), Attr))
12023     return SDValue(N, 0); // Lower SDIV as SDIV
12024 
12025   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
12026          "Unexpected divisor!");
12027 
12028   // Conditional move is needed, so do the transformation iff Zbt is enabled.
12029   if (!Subtarget.hasStdExtZbt())
12030     return SDValue();
12031 
12032   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
12033   // Besides, more critical path instructions will be generated when dividing
12034   // by 2. So we keep using the original DAGs for these cases.
12035   unsigned Lg2 = Divisor.countTrailingZeros();
12036   if (Lg2 == 1 || Lg2 >= 12)
12037     return SDValue();
12038 
12039   // fold (sdiv X, pow2)
12040   EVT VT = N->getValueType(0);
12041   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
12042     return SDValue();
12043 
12044   SDLoc DL(N);
12045   SDValue N0 = N->getOperand(0);
12046   SDValue Zero = DAG.getConstant(0, DL, VT);
12047   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
12048 
12049   // Add (N0 < 0) ? Pow2 - 1 : 0;
12050   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
12051   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
12052   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
12053 
12054   Created.push_back(Cmp.getNode());
12055   Created.push_back(Add.getNode());
12056   Created.push_back(Sel.getNode());
12057 
12058   // Divide by pow2.
12059   SDValue SRA =
12060       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
12061 
12062   // If we're dividing by a positive value, we're done.  Otherwise, we must
12063   // negate the result.
12064   if (Divisor.isNonNegative())
12065     return SRA;
12066 
12067   Created.push_back(SRA.getNode());
12068   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
12069 }
12070 
12071 #define GET_REGISTER_MATCHER
12072 #include "RISCVGenAsmMatcher.inc"
12073 
12074 Register
12075 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
12076                                        const MachineFunction &MF) const {
12077   Register Reg = MatchRegisterAltName(RegName);
12078   if (Reg == RISCV::NoRegister)
12079     Reg = MatchRegisterName(RegName);
12080   if (Reg == RISCV::NoRegister)
12081     report_fatal_error(
12082         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
12083   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
12084   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
12085     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
12086                              StringRef(RegName) + "\"."));
12087   return Reg;
12088 }
12089 
12090 namespace llvm {
12091 namespace RISCVVIntrinsicsTable {
12092 
12093 #define GET_RISCVVIntrinsicsTable_IMPL
12094 #include "RISCVGenSearchableTables.inc"
12095 
12096 } // namespace RISCVVIntrinsicsTable
12097 
12098 } // namespace llvm
12099