1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306 
307     if (Subtarget.is64Bit())
308       setOperationAction(ISD::ABS, MVT::i32, Custom);
309   }
310 
311   if (Subtarget.hasStdExtZbt()) {
312     setOperationAction(ISD::FSHL, XLenVT, Custom);
313     setOperationAction(ISD::FSHR, XLenVT, Custom);
314     setOperationAction(ISD::SELECT, XLenVT, Legal);
315 
316     if (Subtarget.is64Bit()) {
317       setOperationAction(ISD::FSHL, MVT::i32, Custom);
318       setOperationAction(ISD::FSHR, MVT::i32, Custom);
319     }
320   } else {
321     setOperationAction(ISD::SELECT, XLenVT, Custom);
322   }
323 
324   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
325       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
326       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
327       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
328       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
329       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
330       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
331 
332   static const ISD::CondCode FPCCToExpand[] = {
333       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
334       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
335       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
336 
337   static const ISD::NodeType FPOpToExpand[] = {
338       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
339       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
340 
341   if (Subtarget.hasStdExtZfh())
342     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
343 
344   if (Subtarget.hasStdExtZfh()) {
345     for (auto NT : FPLegalNodeTypes)
346       setOperationAction(NT, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
349     for (auto CC : FPCCToExpand)
350       setCondCodeAction(CC, MVT::f16, Expand);
351     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
352     setOperationAction(ISD::SELECT, MVT::f16, Custom);
353     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
354 
355     setOperationAction(ISD::FREM,       MVT::f16, Promote);
356     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
357     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
358     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
359     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
360     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
361     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
362     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
363     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
364     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
365     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
366     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
367     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
368     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
369     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
370     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
371     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
372     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
373 
374     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
375     // complete support for all operations in LegalizeDAG.
376 
377     // We need to custom promote this.
378     if (Subtarget.is64Bit())
379       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
380   }
381 
382   if (Subtarget.hasStdExtF()) {
383     for (auto NT : FPLegalNodeTypes)
384       setOperationAction(NT, MVT::f32, Legal);
385     for (auto CC : FPCCToExpand)
386       setCondCodeAction(CC, MVT::f32, Expand);
387     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
388     setOperationAction(ISD::SELECT, MVT::f32, Custom);
389     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
390     for (auto Op : FPOpToExpand)
391       setOperationAction(Op, MVT::f32, Expand);
392     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
393     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
394   }
395 
396   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
397     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtD()) {
400     for (auto NT : FPLegalNodeTypes)
401       setOperationAction(NT, MVT::f64, Legal);
402     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
404     for (auto CC : FPCCToExpand)
405       setCondCodeAction(CC, MVT::f64, Expand);
406     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Custom);
408     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
409     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
410     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f64, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.is64Bit()) {
418     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
419     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
420     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
421     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
422   }
423 
424   if (Subtarget.hasStdExtF()) {
425     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
426     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
427 
428     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
429     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
430     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
431     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
432 
433     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
434     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
435   }
436 
437   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
438   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
439   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
440   setOperationAction(ISD::JumpTable, XLenVT, Custom);
441 
442   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
443 
444   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
445   // Unfortunately this can't be determined just from the ISA naming string.
446   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
447                      Subtarget.is64Bit() ? Legal : Custom);
448 
449   setOperationAction(ISD::TRAP, MVT::Other, Legal);
450   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
451   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
452   if (Subtarget.is64Bit())
453     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
454 
455   if (Subtarget.hasStdExtA()) {
456     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
457     setMinCmpXchgSizeInBits(32);
458   } else {
459     setMaxAtomicSizeInBitsSupported(0);
460   }
461 
462   setBooleanContents(ZeroOrOneBooleanContent);
463 
464   if (Subtarget.hasVInstructions()) {
465     setBooleanVectorContents(ZeroOrOneBooleanContent);
466 
467     setOperationAction(ISD::VSCALE, XLenVT, Custom);
468 
469     // RVV intrinsics may have illegal operands.
470     // We also need to custom legalize vmv.x.s.
471     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
472     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
473     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
474     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
475     if (Subtarget.is64Bit()) {
476       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
477     } else {
478       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
479       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
480     }
481 
482     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
483     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
484 
485     static const unsigned IntegerVPOps[] = {
486         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
487         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
488         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
489         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
490         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
491         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
492         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
493         ISD::VP_MERGE,       ISD::VP_SELECT};
494 
495     static const unsigned FloatingPointVPOps[] = {
496         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
497         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FMA,
498         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN,
499         ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,       ISD::VP_SELECT};
500 
501     if (!Subtarget.is64Bit()) {
502       // We must custom-lower certain vXi64 operations on RV32 due to the vector
503       // element type being illegal.
504       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
505       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
506 
507       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
508       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
509       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
510       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
511       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
512       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
513       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
514       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
515 
516       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
517       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
518       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
519       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
520       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
521       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
522       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
523       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
524     }
525 
526     for (MVT VT : BoolVecVTs) {
527       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
528 
529       // Mask VTs are custom-expanded into a series of standard nodes
530       setOperationAction(ISD::TRUNCATE, VT, Custom);
531       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
532       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
533       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
534 
535       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
536       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
537 
538       setOperationAction(ISD::SELECT, VT, Custom);
539       setOperationAction(ISD::SELECT_CC, VT, Expand);
540       setOperationAction(ISD::VSELECT, VT, Expand);
541       setOperationAction(ISD::VP_MERGE, VT, Expand);
542       setOperationAction(ISD::VP_SELECT, VT, Expand);
543 
544       setOperationAction(ISD::VP_AND, VT, Custom);
545       setOperationAction(ISD::VP_OR, VT, Custom);
546       setOperationAction(ISD::VP_XOR, VT, Custom);
547 
548       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
549       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
550       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
551 
552       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
553       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
554       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
555 
556       // RVV has native int->float & float->int conversions where the
557       // element type sizes are within one power-of-two of each other. Any
558       // wider distances between type sizes have to be lowered as sequences
559       // which progressively narrow the gap in stages.
560       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
561       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
562       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
563       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
564 
565       // Expand all extending loads to types larger than this, and truncating
566       // stores from types larger than this.
567       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
568         setTruncStoreAction(OtherVT, VT, Expand);
569         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
570         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
571         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
572       }
573     }
574 
575     for (MVT VT : IntVecVTs) {
576       if (VT.getVectorElementType() == MVT::i64 &&
577           !Subtarget.hasVInstructionsI64())
578         continue;
579 
580       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
581       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
582 
583       // Vectors implement MULHS/MULHU.
584       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
585       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
586 
587       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
588       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
589         setOperationAction(ISD::MULHU, VT, Expand);
590         setOperationAction(ISD::MULHS, VT, Expand);
591       }
592 
593       setOperationAction(ISD::SMIN, VT, Legal);
594       setOperationAction(ISD::SMAX, VT, Legal);
595       setOperationAction(ISD::UMIN, VT, Legal);
596       setOperationAction(ISD::UMAX, VT, Legal);
597 
598       setOperationAction(ISD::ROTL, VT, Expand);
599       setOperationAction(ISD::ROTR, VT, Expand);
600 
601       setOperationAction(ISD::CTTZ, VT, Expand);
602       setOperationAction(ISD::CTLZ, VT, Expand);
603       setOperationAction(ISD::CTPOP, VT, Expand);
604 
605       setOperationAction(ISD::BSWAP, VT, Expand);
606 
607       // Custom-lower extensions and truncations from/to mask types.
608       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
609       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
610       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
611 
612       // RVV has native int->float & float->int conversions where the
613       // element type sizes are within one power-of-two of each other. Any
614       // wider distances between type sizes have to be lowered as sequences
615       // which progressively narrow the gap in stages.
616       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
617       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
618       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
619       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
620 
621       setOperationAction(ISD::SADDSAT, VT, Legal);
622       setOperationAction(ISD::UADDSAT, VT, Legal);
623       setOperationAction(ISD::SSUBSAT, VT, Legal);
624       setOperationAction(ISD::USUBSAT, VT, Legal);
625 
626       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
627       // nodes which truncate by one power of two at a time.
628       setOperationAction(ISD::TRUNCATE, VT, Custom);
629 
630       // Custom-lower insert/extract operations to simplify patterns.
631       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
632       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
633 
634       // Custom-lower reduction operations to set up the corresponding custom
635       // nodes' operands.
636       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
637       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
638       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
639       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
640       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
641       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
642       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
643       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
644 
645       for (unsigned VPOpc : IntegerVPOps)
646         setOperationAction(VPOpc, VT, Custom);
647 
648       setOperationAction(ISD::LOAD, VT, Custom);
649       setOperationAction(ISD::STORE, VT, Custom);
650 
651       setOperationAction(ISD::MLOAD, VT, Custom);
652       setOperationAction(ISD::MSTORE, VT, Custom);
653       setOperationAction(ISD::MGATHER, VT, Custom);
654       setOperationAction(ISD::MSCATTER, VT, Custom);
655 
656       setOperationAction(ISD::VP_LOAD, VT, Custom);
657       setOperationAction(ISD::VP_STORE, VT, Custom);
658       setOperationAction(ISD::VP_GATHER, VT, Custom);
659       setOperationAction(ISD::VP_SCATTER, VT, Custom);
660 
661       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
662       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
663       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
664 
665       setOperationAction(ISD::SELECT, VT, Custom);
666       setOperationAction(ISD::SELECT_CC, VT, Expand);
667 
668       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
669       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
670 
671       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
672         setTruncStoreAction(VT, OtherVT, Expand);
673         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
674         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
675         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
676       }
677 
678       // Splice
679       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
680 
681       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
682       // type that can represent the value exactly.
683       if (VT.getVectorElementType() != MVT::i64) {
684         MVT FloatEltVT =
685             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
686         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
687         if (isTypeLegal(FloatVT)) {
688           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
689           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
690         }
691       }
692     }
693 
694     // Expand various CCs to best match the RVV ISA, which natively supports UNE
695     // but no other unordered comparisons, and supports all ordered comparisons
696     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
697     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
698     // and we pattern-match those back to the "original", swapping operands once
699     // more. This way we catch both operations and both "vf" and "fv" forms with
700     // fewer patterns.
701     static const ISD::CondCode VFPCCToExpand[] = {
702         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
703         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
704         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
705     };
706 
707     // Sets common operation actions on RVV floating-point vector types.
708     const auto SetCommonVFPActions = [&](MVT VT) {
709       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
710       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
711       // sizes are within one power-of-two of each other. Therefore conversions
712       // between vXf16 and vXf64 must be lowered as sequences which convert via
713       // vXf32.
714       setOperationAction(ISD::FP_ROUND, VT, Custom);
715       setOperationAction(ISD::FP_EXTEND, VT, Custom);
716       // Custom-lower insert/extract operations to simplify patterns.
717       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
718       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
719       // Expand various condition codes (explained above).
720       for (auto CC : VFPCCToExpand)
721         setCondCodeAction(CC, VT, Expand);
722 
723       setOperationAction(ISD::FMINNUM, VT, Legal);
724       setOperationAction(ISD::FMAXNUM, VT, Legal);
725 
726       setOperationAction(ISD::FTRUNC, VT, Custom);
727       setOperationAction(ISD::FCEIL, VT, Custom);
728       setOperationAction(ISD::FFLOOR, VT, Custom);
729       setOperationAction(ISD::FROUND, VT, Custom);
730 
731       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
732       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
733       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
734       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
735 
736       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
737 
738       setOperationAction(ISD::LOAD, VT, Custom);
739       setOperationAction(ISD::STORE, VT, Custom);
740 
741       setOperationAction(ISD::MLOAD, VT, Custom);
742       setOperationAction(ISD::MSTORE, VT, Custom);
743       setOperationAction(ISD::MGATHER, VT, Custom);
744       setOperationAction(ISD::MSCATTER, VT, Custom);
745 
746       setOperationAction(ISD::VP_LOAD, VT, Custom);
747       setOperationAction(ISD::VP_STORE, VT, Custom);
748       setOperationAction(ISD::VP_GATHER, VT, Custom);
749       setOperationAction(ISD::VP_SCATTER, VT, Custom);
750 
751       setOperationAction(ISD::SELECT, VT, Custom);
752       setOperationAction(ISD::SELECT_CC, VT, Expand);
753 
754       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
755       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
756       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
757 
758       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
759       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
760 
761       for (unsigned VPOpc : FloatingPointVPOps)
762         setOperationAction(VPOpc, VT, Custom);
763     };
764 
765     // Sets common extload/truncstore actions on RVV floating-point vector
766     // types.
767     const auto SetCommonVFPExtLoadTruncStoreActions =
768         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
769           for (auto SmallVT : SmallerVTs) {
770             setTruncStoreAction(VT, SmallVT, Expand);
771             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
772           }
773         };
774 
775     if (Subtarget.hasVInstructionsF16())
776       for (MVT VT : F16VecVTs)
777         SetCommonVFPActions(VT);
778 
779     for (MVT VT : F32VecVTs) {
780       if (Subtarget.hasVInstructionsF32())
781         SetCommonVFPActions(VT);
782       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
783     }
784 
785     for (MVT VT : F64VecVTs) {
786       if (Subtarget.hasVInstructionsF64())
787         SetCommonVFPActions(VT);
788       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
789       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
790     }
791 
792     if (Subtarget.useRVVForFixedLengthVectors()) {
793       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
794         if (!useRVVForFixedLengthVectorVT(VT))
795           continue;
796 
797         // By default everything must be expanded.
798         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
799           setOperationAction(Op, VT, Expand);
800         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
801           setTruncStoreAction(VT, OtherVT, Expand);
802           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
803           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
804           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
805         }
806 
807         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
808         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
809         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
810 
811         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
812         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
813 
814         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
815         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
816 
817         setOperationAction(ISD::LOAD, VT, Custom);
818         setOperationAction(ISD::STORE, VT, Custom);
819 
820         setOperationAction(ISD::SETCC, VT, Custom);
821 
822         setOperationAction(ISD::SELECT, VT, Custom);
823 
824         setOperationAction(ISD::TRUNCATE, VT, Custom);
825 
826         setOperationAction(ISD::BITCAST, VT, Custom);
827 
828         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
829         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
830         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
831 
832         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
833         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
834         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
835 
836         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
837         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
838         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
839         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
840 
841         // Operations below are different for between masks and other vectors.
842         if (VT.getVectorElementType() == MVT::i1) {
843           setOperationAction(ISD::VP_AND, VT, Custom);
844           setOperationAction(ISD::VP_OR, VT, Custom);
845           setOperationAction(ISD::VP_XOR, VT, Custom);
846           setOperationAction(ISD::AND, VT, Custom);
847           setOperationAction(ISD::OR, VT, Custom);
848           setOperationAction(ISD::XOR, VT, Custom);
849           continue;
850         }
851 
852         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
853         // it before type legalization for i64 vectors on RV32. It will then be
854         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
855         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
856         // improvements first.
857         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
858           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
859           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
860         }
861 
862         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
863         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
864 
865         setOperationAction(ISD::MLOAD, VT, Custom);
866         setOperationAction(ISD::MSTORE, VT, Custom);
867         setOperationAction(ISD::MGATHER, VT, Custom);
868         setOperationAction(ISD::MSCATTER, VT, Custom);
869 
870         setOperationAction(ISD::VP_LOAD, VT, Custom);
871         setOperationAction(ISD::VP_STORE, VT, Custom);
872         setOperationAction(ISD::VP_GATHER, VT, Custom);
873         setOperationAction(ISD::VP_SCATTER, VT, Custom);
874 
875         setOperationAction(ISD::ADD, VT, Custom);
876         setOperationAction(ISD::MUL, VT, Custom);
877         setOperationAction(ISD::SUB, VT, Custom);
878         setOperationAction(ISD::AND, VT, Custom);
879         setOperationAction(ISD::OR, VT, Custom);
880         setOperationAction(ISD::XOR, VT, Custom);
881         setOperationAction(ISD::SDIV, VT, Custom);
882         setOperationAction(ISD::SREM, VT, Custom);
883         setOperationAction(ISD::UDIV, VT, Custom);
884         setOperationAction(ISD::UREM, VT, Custom);
885         setOperationAction(ISD::SHL, VT, Custom);
886         setOperationAction(ISD::SRA, VT, Custom);
887         setOperationAction(ISD::SRL, VT, Custom);
888 
889         setOperationAction(ISD::SMIN, VT, Custom);
890         setOperationAction(ISD::SMAX, VT, Custom);
891         setOperationAction(ISD::UMIN, VT, Custom);
892         setOperationAction(ISD::UMAX, VT, Custom);
893         setOperationAction(ISD::ABS,  VT, Custom);
894 
895         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
896         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
897           setOperationAction(ISD::MULHS, VT, Custom);
898           setOperationAction(ISD::MULHU, VT, Custom);
899         }
900 
901         setOperationAction(ISD::SADDSAT, VT, Custom);
902         setOperationAction(ISD::UADDSAT, VT, Custom);
903         setOperationAction(ISD::SSUBSAT, VT, Custom);
904         setOperationAction(ISD::USUBSAT, VT, Custom);
905 
906         setOperationAction(ISD::VSELECT, VT, Custom);
907         setOperationAction(ISD::SELECT_CC, VT, Expand);
908 
909         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
910         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
911         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
912 
913         // Custom-lower reduction operations to set up the corresponding custom
914         // nodes' operands.
915         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
916         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
917         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
918         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
919         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
920 
921         for (unsigned VPOpc : IntegerVPOps)
922           setOperationAction(VPOpc, VT, Custom);
923 
924         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
925         // type that can represent the value exactly.
926         if (VT.getVectorElementType() != MVT::i64) {
927           MVT FloatEltVT =
928               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
929           EVT FloatVT =
930               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
931           if (isTypeLegal(FloatVT)) {
932             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
933             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
934           }
935         }
936       }
937 
938       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
939         if (!useRVVForFixedLengthVectorVT(VT))
940           continue;
941 
942         // By default everything must be expanded.
943         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
944           setOperationAction(Op, VT, Expand);
945         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
946           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
947           setTruncStoreAction(VT, OtherVT, Expand);
948         }
949 
950         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
951         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
952         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
953 
954         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
955         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
956         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
957         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
958         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
959 
960         setOperationAction(ISD::LOAD, VT, Custom);
961         setOperationAction(ISD::STORE, VT, Custom);
962         setOperationAction(ISD::MLOAD, VT, Custom);
963         setOperationAction(ISD::MSTORE, VT, Custom);
964         setOperationAction(ISD::MGATHER, VT, Custom);
965         setOperationAction(ISD::MSCATTER, VT, Custom);
966 
967         setOperationAction(ISD::VP_LOAD, VT, Custom);
968         setOperationAction(ISD::VP_STORE, VT, Custom);
969         setOperationAction(ISD::VP_GATHER, VT, Custom);
970         setOperationAction(ISD::VP_SCATTER, VT, Custom);
971 
972         setOperationAction(ISD::FADD, VT, Custom);
973         setOperationAction(ISD::FSUB, VT, Custom);
974         setOperationAction(ISD::FMUL, VT, Custom);
975         setOperationAction(ISD::FDIV, VT, Custom);
976         setOperationAction(ISD::FNEG, VT, Custom);
977         setOperationAction(ISD::FABS, VT, Custom);
978         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
979         setOperationAction(ISD::FSQRT, VT, Custom);
980         setOperationAction(ISD::FMA, VT, Custom);
981         setOperationAction(ISD::FMINNUM, VT, Custom);
982         setOperationAction(ISD::FMAXNUM, VT, Custom);
983 
984         setOperationAction(ISD::FP_ROUND, VT, Custom);
985         setOperationAction(ISD::FP_EXTEND, VT, Custom);
986 
987         setOperationAction(ISD::FTRUNC, VT, Custom);
988         setOperationAction(ISD::FCEIL, VT, Custom);
989         setOperationAction(ISD::FFLOOR, VT, Custom);
990         setOperationAction(ISD::FROUND, VT, Custom);
991 
992         for (auto CC : VFPCCToExpand)
993           setCondCodeAction(CC, VT, Expand);
994 
995         setOperationAction(ISD::VSELECT, VT, Custom);
996         setOperationAction(ISD::SELECT, VT, Custom);
997         setOperationAction(ISD::SELECT_CC, VT, Expand);
998 
999         setOperationAction(ISD::BITCAST, VT, Custom);
1000 
1001         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1002         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1003         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1004         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1005 
1006         for (unsigned VPOpc : FloatingPointVPOps)
1007           setOperationAction(VPOpc, VT, Custom);
1008       }
1009 
1010       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1011       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1012       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1013       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1014       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1015       if (Subtarget.hasStdExtZfh())
1016         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1017       if (Subtarget.hasStdExtF())
1018         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1019       if (Subtarget.hasStdExtD())
1020         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1021     }
1022   }
1023 
1024   // Function alignments.
1025   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1026   setMinFunctionAlignment(FunctionAlignment);
1027   setPrefFunctionAlignment(FunctionAlignment);
1028 
1029   setMinimumJumpTableEntries(5);
1030 
1031   // Jumps are expensive, compared to logic
1032   setJumpIsExpensive();
1033 
1034   setTargetDAGCombine(ISD::ADD);
1035   setTargetDAGCombine(ISD::SUB);
1036   setTargetDAGCombine(ISD::AND);
1037   setTargetDAGCombine(ISD::OR);
1038   setTargetDAGCombine(ISD::XOR);
1039   if (Subtarget.hasStdExtZbp()) {
1040     setTargetDAGCombine(ISD::ROTL);
1041     setTargetDAGCombine(ISD::ROTR);
1042   }
1043   if (Subtarget.hasStdExtZbkb())
1044     setTargetDAGCombine(ISD::BITREVERSE);
1045   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1046   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
1047     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1048   if (Subtarget.hasStdExtF()) {
1049     setTargetDAGCombine(ISD::ZERO_EXTEND);
1050     setTargetDAGCombine(ISD::FP_TO_SINT);
1051     setTargetDAGCombine(ISD::FP_TO_UINT);
1052     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1053     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1054   }
1055   if (Subtarget.hasVInstructions()) {
1056     setTargetDAGCombine(ISD::FCOPYSIGN);
1057     setTargetDAGCombine(ISD::MGATHER);
1058     setTargetDAGCombine(ISD::MSCATTER);
1059     setTargetDAGCombine(ISD::VP_GATHER);
1060     setTargetDAGCombine(ISD::VP_SCATTER);
1061     setTargetDAGCombine(ISD::SRA);
1062     setTargetDAGCombine(ISD::SRL);
1063     setTargetDAGCombine(ISD::SHL);
1064     setTargetDAGCombine(ISD::STORE);
1065     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1066   }
1067 
1068   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1069   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1070 }
1071 
1072 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1073                                             LLVMContext &Context,
1074                                             EVT VT) const {
1075   if (!VT.isVector())
1076     return getPointerTy(DL);
1077   if (Subtarget.hasVInstructions() &&
1078       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1079     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1080   return VT.changeVectorElementTypeToInteger();
1081 }
1082 
1083 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1084   return Subtarget.getXLenVT();
1085 }
1086 
1087 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1088                                              const CallInst &I,
1089                                              MachineFunction &MF,
1090                                              unsigned Intrinsic) const {
1091   auto &DL = I.getModule()->getDataLayout();
1092   switch (Intrinsic) {
1093   default:
1094     return false;
1095   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1096   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1097   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1098   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1099   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1100   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1101   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1102   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1103   case Intrinsic::riscv_masked_cmpxchg_i32:
1104     Info.opc = ISD::INTRINSIC_W_CHAIN;
1105     Info.memVT = MVT::i32;
1106     Info.ptrVal = I.getArgOperand(0);
1107     Info.offset = 0;
1108     Info.align = Align(4);
1109     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1110                  MachineMemOperand::MOVolatile;
1111     return true;
1112   case Intrinsic::riscv_masked_strided_load:
1113     Info.opc = ISD::INTRINSIC_W_CHAIN;
1114     Info.ptrVal = I.getArgOperand(1);
1115     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1116     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1117     Info.size = MemoryLocation::UnknownSize;
1118     Info.flags |= MachineMemOperand::MOLoad;
1119     return true;
1120   case Intrinsic::riscv_masked_strided_store:
1121     Info.opc = ISD::INTRINSIC_VOID;
1122     Info.ptrVal = I.getArgOperand(1);
1123     Info.memVT =
1124         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1125     Info.align = Align(
1126         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1127         8);
1128     Info.size = MemoryLocation::UnknownSize;
1129     Info.flags |= MachineMemOperand::MOStore;
1130     return true;
1131   case Intrinsic::riscv_seg2_load:
1132   case Intrinsic::riscv_seg3_load:
1133   case Intrinsic::riscv_seg4_load:
1134   case Intrinsic::riscv_seg5_load:
1135   case Intrinsic::riscv_seg6_load:
1136   case Intrinsic::riscv_seg7_load:
1137   case Intrinsic::riscv_seg8_load:
1138     Info.opc = ISD::INTRINSIC_W_CHAIN;
1139     Info.ptrVal = I.getArgOperand(0);
1140     Info.memVT =
1141         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1142     Info.align =
1143         Align(DL.getTypeSizeInBits(
1144                   I.getType()->getStructElementType(0)->getScalarType()) /
1145               8);
1146     Info.size = MemoryLocation::UnknownSize;
1147     Info.flags |= MachineMemOperand::MOLoad;
1148     return true;
1149   }
1150 }
1151 
1152 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1153                                                 const AddrMode &AM, Type *Ty,
1154                                                 unsigned AS,
1155                                                 Instruction *I) const {
1156   // No global is ever allowed as a base.
1157   if (AM.BaseGV)
1158     return false;
1159 
1160   // Require a 12-bit signed offset.
1161   if (!isInt<12>(AM.BaseOffs))
1162     return false;
1163 
1164   switch (AM.Scale) {
1165   case 0: // "r+i" or just "i", depending on HasBaseReg.
1166     break;
1167   case 1:
1168     if (!AM.HasBaseReg) // allow "r+i".
1169       break;
1170     return false; // disallow "r+r" or "r+r+i".
1171   default:
1172     return false;
1173   }
1174 
1175   return true;
1176 }
1177 
1178 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1179   return isInt<12>(Imm);
1180 }
1181 
1182 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1183   return isInt<12>(Imm);
1184 }
1185 
1186 // On RV32, 64-bit integers are split into their high and low parts and held
1187 // in two different registers, so the trunc is free since the low register can
1188 // just be used.
1189 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1190   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1191     return false;
1192   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1193   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1194   return (SrcBits == 64 && DestBits == 32);
1195 }
1196 
1197 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1198   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1199       !SrcVT.isInteger() || !DstVT.isInteger())
1200     return false;
1201   unsigned SrcBits = SrcVT.getSizeInBits();
1202   unsigned DestBits = DstVT.getSizeInBits();
1203   return (SrcBits == 64 && DestBits == 32);
1204 }
1205 
1206 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1207   // Zexts are free if they can be combined with a load.
1208   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1209   // poorly with type legalization of compares preferring sext.
1210   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1211     EVT MemVT = LD->getMemoryVT();
1212     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1213         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1214          LD->getExtensionType() == ISD::ZEXTLOAD))
1215       return true;
1216   }
1217 
1218   return TargetLowering::isZExtFree(Val, VT2);
1219 }
1220 
1221 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1222   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1223 }
1224 
1225 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1226   return Subtarget.hasStdExtZbb();
1227 }
1228 
1229 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1230   return Subtarget.hasStdExtZbb();
1231 }
1232 
1233 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1234   EVT VT = Y.getValueType();
1235 
1236   // FIXME: Support vectors once we have tests.
1237   if (VT.isVector())
1238     return false;
1239 
1240   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1241           Subtarget.hasStdExtZbkb()) &&
1242          !isa<ConstantSDNode>(Y);
1243 }
1244 
1245 /// Check if sinking \p I's operands to I's basic block is profitable, because
1246 /// the operands can be folded into a target instruction, e.g.
1247 /// splats of scalars can fold into vector instructions.
1248 bool RISCVTargetLowering::shouldSinkOperands(
1249     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1250   using namespace llvm::PatternMatch;
1251 
1252   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1253     return false;
1254 
1255   auto IsSinker = [&](Instruction *I, int Operand) {
1256     switch (I->getOpcode()) {
1257     case Instruction::Add:
1258     case Instruction::Sub:
1259     case Instruction::Mul:
1260     case Instruction::And:
1261     case Instruction::Or:
1262     case Instruction::Xor:
1263     case Instruction::FAdd:
1264     case Instruction::FSub:
1265     case Instruction::FMul:
1266     case Instruction::FDiv:
1267     case Instruction::ICmp:
1268     case Instruction::FCmp:
1269       return true;
1270     case Instruction::Shl:
1271     case Instruction::LShr:
1272     case Instruction::AShr:
1273     case Instruction::UDiv:
1274     case Instruction::SDiv:
1275     case Instruction::URem:
1276     case Instruction::SRem:
1277       return Operand == 1;
1278     case Instruction::Call:
1279       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1280         switch (II->getIntrinsicID()) {
1281         case Intrinsic::fma:
1282         case Intrinsic::vp_fma:
1283           return Operand == 0 || Operand == 1;
1284         // FIXME: Our patterns can only match vx/vf instructions when the splat
1285         // it on the RHS, because TableGen doesn't recognize our VP operations
1286         // as commutative.
1287         case Intrinsic::vp_add:
1288         case Intrinsic::vp_mul:
1289         case Intrinsic::vp_and:
1290         case Intrinsic::vp_or:
1291         case Intrinsic::vp_xor:
1292         case Intrinsic::vp_fadd:
1293         case Intrinsic::vp_fmul:
1294         case Intrinsic::vp_shl:
1295         case Intrinsic::vp_lshr:
1296         case Intrinsic::vp_ashr:
1297         case Intrinsic::vp_udiv:
1298         case Intrinsic::vp_sdiv:
1299         case Intrinsic::vp_urem:
1300         case Intrinsic::vp_srem:
1301           return Operand == 1;
1302         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1303         // explicit patterns for both LHS and RHS (as 'vr' versions).
1304         case Intrinsic::vp_sub:
1305         case Intrinsic::vp_fsub:
1306         case Intrinsic::vp_fdiv:
1307           return Operand == 0 || Operand == 1;
1308         default:
1309           return false;
1310         }
1311       }
1312       return false;
1313     default:
1314       return false;
1315     }
1316   };
1317 
1318   for (auto OpIdx : enumerate(I->operands())) {
1319     if (!IsSinker(I, OpIdx.index()))
1320       continue;
1321 
1322     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1323     // Make sure we are not already sinking this operand
1324     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1325       continue;
1326 
1327     // We are looking for a splat that can be sunk.
1328     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1329                              m_Undef(), m_ZeroMask())))
1330       continue;
1331 
1332     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1333     // and vector registers
1334     for (Use &U : Op->uses()) {
1335       Instruction *Insn = cast<Instruction>(U.getUser());
1336       if (!IsSinker(Insn, U.getOperandNo()))
1337         return false;
1338     }
1339 
1340     Ops.push_back(&Op->getOperandUse(0));
1341     Ops.push_back(&OpIdx.value());
1342   }
1343   return true;
1344 }
1345 
1346 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1347                                        bool ForCodeSize) const {
1348   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1349   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1350     return false;
1351   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1352     return false;
1353   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1354     return false;
1355   return Imm.isZero();
1356 }
1357 
1358 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1359   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1360          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1361          (VT == MVT::f64 && Subtarget.hasStdExtD());
1362 }
1363 
1364 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1365                                                       CallingConv::ID CC,
1366                                                       EVT VT) const {
1367   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1368   // We might still end up using a GPR but that will be decided based on ABI.
1369   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1370   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1371     return MVT::f32;
1372 
1373   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1374 }
1375 
1376 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1377                                                            CallingConv::ID CC,
1378                                                            EVT VT) const {
1379   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1380   // We might still end up using a GPR but that will be decided based on ABI.
1381   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1382   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1383     return 1;
1384 
1385   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1386 }
1387 
1388 // Changes the condition code and swaps operands if necessary, so the SetCC
1389 // operation matches one of the comparisons supported directly by branches
1390 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1391 // with 1/-1.
1392 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1393                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1394   // Convert X > -1 to X >= 0.
1395   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1396     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1397     CC = ISD::SETGE;
1398     return;
1399   }
1400   // Convert X < 1 to 0 >= X.
1401   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1402     RHS = LHS;
1403     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1404     CC = ISD::SETGE;
1405     return;
1406   }
1407 
1408   switch (CC) {
1409   default:
1410     break;
1411   case ISD::SETGT:
1412   case ISD::SETLE:
1413   case ISD::SETUGT:
1414   case ISD::SETULE:
1415     CC = ISD::getSetCCSwappedOperands(CC);
1416     std::swap(LHS, RHS);
1417     break;
1418   }
1419 }
1420 
1421 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1422   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1423   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1424   if (VT.getVectorElementType() == MVT::i1)
1425     KnownSize *= 8;
1426 
1427   switch (KnownSize) {
1428   default:
1429     llvm_unreachable("Invalid LMUL.");
1430   case 8:
1431     return RISCVII::VLMUL::LMUL_F8;
1432   case 16:
1433     return RISCVII::VLMUL::LMUL_F4;
1434   case 32:
1435     return RISCVII::VLMUL::LMUL_F2;
1436   case 64:
1437     return RISCVII::VLMUL::LMUL_1;
1438   case 128:
1439     return RISCVII::VLMUL::LMUL_2;
1440   case 256:
1441     return RISCVII::VLMUL::LMUL_4;
1442   case 512:
1443     return RISCVII::VLMUL::LMUL_8;
1444   }
1445 }
1446 
1447 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1448   switch (LMul) {
1449   default:
1450     llvm_unreachable("Invalid LMUL.");
1451   case RISCVII::VLMUL::LMUL_F8:
1452   case RISCVII::VLMUL::LMUL_F4:
1453   case RISCVII::VLMUL::LMUL_F2:
1454   case RISCVII::VLMUL::LMUL_1:
1455     return RISCV::VRRegClassID;
1456   case RISCVII::VLMUL::LMUL_2:
1457     return RISCV::VRM2RegClassID;
1458   case RISCVII::VLMUL::LMUL_4:
1459     return RISCV::VRM4RegClassID;
1460   case RISCVII::VLMUL::LMUL_8:
1461     return RISCV::VRM8RegClassID;
1462   }
1463 }
1464 
1465 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1466   RISCVII::VLMUL LMUL = getLMUL(VT);
1467   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1468       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1469       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1470       LMUL == RISCVII::VLMUL::LMUL_1) {
1471     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1472                   "Unexpected subreg numbering");
1473     return RISCV::sub_vrm1_0 + Index;
1474   }
1475   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1476     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1477                   "Unexpected subreg numbering");
1478     return RISCV::sub_vrm2_0 + Index;
1479   }
1480   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1481     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1482                   "Unexpected subreg numbering");
1483     return RISCV::sub_vrm4_0 + Index;
1484   }
1485   llvm_unreachable("Invalid vector type.");
1486 }
1487 
1488 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1489   if (VT.getVectorElementType() == MVT::i1)
1490     return RISCV::VRRegClassID;
1491   return getRegClassIDForLMUL(getLMUL(VT));
1492 }
1493 
1494 // Attempt to decompose a subvector insert/extract between VecVT and
1495 // SubVecVT via subregister indices. Returns the subregister index that
1496 // can perform the subvector insert/extract with the given element index, as
1497 // well as the index corresponding to any leftover subvectors that must be
1498 // further inserted/extracted within the register class for SubVecVT.
1499 std::pair<unsigned, unsigned>
1500 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1501     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1502     const RISCVRegisterInfo *TRI) {
1503   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1504                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1505                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1506                 "Register classes not ordered");
1507   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1508   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1509   // Try to compose a subregister index that takes us from the incoming
1510   // LMUL>1 register class down to the outgoing one. At each step we half
1511   // the LMUL:
1512   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1513   // Note that this is not guaranteed to find a subregister index, such as
1514   // when we are extracting from one VR type to another.
1515   unsigned SubRegIdx = RISCV::NoSubRegister;
1516   for (const unsigned RCID :
1517        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1518     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1519       VecVT = VecVT.getHalfNumVectorElementsVT();
1520       bool IsHi =
1521           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1522       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1523                                             getSubregIndexByMVT(VecVT, IsHi));
1524       if (IsHi)
1525         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1526     }
1527   return {SubRegIdx, InsertExtractIdx};
1528 }
1529 
1530 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1531 // stores for those types.
1532 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1533   return !Subtarget.useRVVForFixedLengthVectors() ||
1534          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1535 }
1536 
1537 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1538   if (ScalarTy->isPointerTy())
1539     return true;
1540 
1541   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1542       ScalarTy->isIntegerTy(32))
1543     return true;
1544 
1545   if (ScalarTy->isIntegerTy(64))
1546     return Subtarget.hasVInstructionsI64();
1547 
1548   if (ScalarTy->isHalfTy())
1549     return Subtarget.hasVInstructionsF16();
1550   if (ScalarTy->isFloatTy())
1551     return Subtarget.hasVInstructionsF32();
1552   if (ScalarTy->isDoubleTy())
1553     return Subtarget.hasVInstructionsF64();
1554 
1555   return false;
1556 }
1557 
1558 static SDValue getVLOperand(SDValue Op) {
1559   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1560           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1561          "Unexpected opcode");
1562   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1563   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1564   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1565       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1566   if (!II)
1567     return SDValue();
1568   return Op.getOperand(II->VLOperand + 1 + HasChain);
1569 }
1570 
1571 static bool useRVVForFixedLengthVectorVT(MVT VT,
1572                                          const RISCVSubtarget &Subtarget) {
1573   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1574   if (!Subtarget.useRVVForFixedLengthVectors())
1575     return false;
1576 
1577   // We only support a set of vector types with a consistent maximum fixed size
1578   // across all supported vector element types to avoid legalization issues.
1579   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1580   // fixed-length vector type we support is 1024 bytes.
1581   if (VT.getFixedSizeInBits() > 1024 * 8)
1582     return false;
1583 
1584   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1585 
1586   MVT EltVT = VT.getVectorElementType();
1587 
1588   // Don't use RVV for vectors we cannot scalarize if required.
1589   switch (EltVT.SimpleTy) {
1590   // i1 is supported but has different rules.
1591   default:
1592     return false;
1593   case MVT::i1:
1594     // Masks can only use a single register.
1595     if (VT.getVectorNumElements() > MinVLen)
1596       return false;
1597     MinVLen /= 8;
1598     break;
1599   case MVT::i8:
1600   case MVT::i16:
1601   case MVT::i32:
1602     break;
1603   case MVT::i64:
1604     if (!Subtarget.hasVInstructionsI64())
1605       return false;
1606     break;
1607   case MVT::f16:
1608     if (!Subtarget.hasVInstructionsF16())
1609       return false;
1610     break;
1611   case MVT::f32:
1612     if (!Subtarget.hasVInstructionsF32())
1613       return false;
1614     break;
1615   case MVT::f64:
1616     if (!Subtarget.hasVInstructionsF64())
1617       return false;
1618     break;
1619   }
1620 
1621   // Reject elements larger than ELEN.
1622   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1623     return false;
1624 
1625   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1626   // Don't use RVV for types that don't fit.
1627   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1628     return false;
1629 
1630   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1631   // the base fixed length RVV support in place.
1632   if (!VT.isPow2VectorType())
1633     return false;
1634 
1635   return true;
1636 }
1637 
1638 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1639   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1640 }
1641 
1642 // Return the largest legal scalable vector type that matches VT's element type.
1643 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1644                                             const RISCVSubtarget &Subtarget) {
1645   // This may be called before legal types are setup.
1646   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1647           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1648          "Expected legal fixed length vector!");
1649 
1650   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1651   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1652 
1653   MVT EltVT = VT.getVectorElementType();
1654   switch (EltVT.SimpleTy) {
1655   default:
1656     llvm_unreachable("unexpected element type for RVV container");
1657   case MVT::i1:
1658   case MVT::i8:
1659   case MVT::i16:
1660   case MVT::i32:
1661   case MVT::i64:
1662   case MVT::f16:
1663   case MVT::f32:
1664   case MVT::f64: {
1665     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1666     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1667     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1668     unsigned NumElts =
1669         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1670     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1671     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1672     return MVT::getScalableVectorVT(EltVT, NumElts);
1673   }
1674   }
1675 }
1676 
1677 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1678                                             const RISCVSubtarget &Subtarget) {
1679   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1680                                           Subtarget);
1681 }
1682 
1683 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1684   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1685 }
1686 
1687 // Grow V to consume an entire RVV register.
1688 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1689                                        const RISCVSubtarget &Subtarget) {
1690   assert(VT.isScalableVector() &&
1691          "Expected to convert into a scalable vector!");
1692   assert(V.getValueType().isFixedLengthVector() &&
1693          "Expected a fixed length vector operand!");
1694   SDLoc DL(V);
1695   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1696   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1697 }
1698 
1699 // Shrink V so it's just big enough to maintain a VT's worth of data.
1700 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1701                                          const RISCVSubtarget &Subtarget) {
1702   assert(VT.isFixedLengthVector() &&
1703          "Expected to convert into a fixed length vector!");
1704   assert(V.getValueType().isScalableVector() &&
1705          "Expected a scalable vector operand!");
1706   SDLoc DL(V);
1707   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1708   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1709 }
1710 
1711 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1712 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1713 // the vector type that it is contained in.
1714 static std::pair<SDValue, SDValue>
1715 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1716                 const RISCVSubtarget &Subtarget) {
1717   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1718   MVT XLenVT = Subtarget.getXLenVT();
1719   SDValue VL = VecVT.isFixedLengthVector()
1720                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1721                    : DAG.getRegister(RISCV::X0, XLenVT);
1722   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1723   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1724   return {Mask, VL};
1725 }
1726 
1727 // As above but assuming the given type is a scalable vector type.
1728 static std::pair<SDValue, SDValue>
1729 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1730                         const RISCVSubtarget &Subtarget) {
1731   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1732   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1733 }
1734 
1735 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1736 // of either is (currently) supported. This can get us into an infinite loop
1737 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1738 // as a ..., etc.
1739 // Until either (or both) of these can reliably lower any node, reporting that
1740 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1741 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1742 // which is not desirable.
1743 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1744     EVT VT, unsigned DefinedValues) const {
1745   return false;
1746 }
1747 
1748 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1749                                   const RISCVSubtarget &Subtarget) {
1750   // RISCV FP-to-int conversions saturate to the destination register size, but
1751   // don't produce 0 for nan. We can use a conversion instruction and fix the
1752   // nan case with a compare and a select.
1753   SDValue Src = Op.getOperand(0);
1754 
1755   EVT DstVT = Op.getValueType();
1756   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1757 
1758   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1759   unsigned Opc;
1760   if (SatVT == DstVT)
1761     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1762   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1763     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1764   else
1765     return SDValue();
1766   // FIXME: Support other SatVTs by clamping before or after the conversion.
1767 
1768   SDLoc DL(Op);
1769   SDValue FpToInt = DAG.getNode(
1770       Opc, DL, DstVT, Src,
1771       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1772 
1773   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1774   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1775 }
1776 
1777 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1778 // and back. Taking care to avoid converting values that are nan or already
1779 // correct.
1780 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1781 // have FRM dependencies modeled yet.
1782 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1783   MVT VT = Op.getSimpleValueType();
1784   assert(VT.isVector() && "Unexpected type");
1785 
1786   SDLoc DL(Op);
1787 
1788   // Freeze the source since we are increasing the number of uses.
1789   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1790 
1791   // Truncate to integer and convert back to FP.
1792   MVT IntVT = VT.changeVectorElementTypeToInteger();
1793   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1794   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1795 
1796   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1797 
1798   if (Op.getOpcode() == ISD::FCEIL) {
1799     // If the truncated value is the greater than or equal to the original
1800     // value, we've computed the ceil. Otherwise, we went the wrong way and
1801     // need to increase by 1.
1802     // FIXME: This should use a masked operation. Handle here or in isel?
1803     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1804                                  DAG.getConstantFP(1.0, DL, VT));
1805     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1806     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1807   } else if (Op.getOpcode() == ISD::FFLOOR) {
1808     // If the truncated value is the less than or equal to the original value,
1809     // we've computed the floor. Otherwise, we went the wrong way and need to
1810     // decrease by 1.
1811     // FIXME: This should use a masked operation. Handle here or in isel?
1812     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1813                                  DAG.getConstantFP(1.0, DL, VT));
1814     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1815     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1816   }
1817 
1818   // Restore the original sign so that -0.0 is preserved.
1819   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1820 
1821   // Determine the largest integer that can be represented exactly. This and
1822   // values larger than it don't have any fractional bits so don't need to
1823   // be converted.
1824   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1825   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1826   APFloat MaxVal = APFloat(FltSem);
1827   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1828                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1829   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1830 
1831   // If abs(Src) was larger than MaxVal or nan, keep it.
1832   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1833   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1834   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1835 }
1836 
1837 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1838 // This mode isn't supported in vector hardware on RISCV. But as long as we
1839 // aren't compiling with trapping math, we can emulate this with
1840 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1841 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1842 // dependencies modeled yet.
1843 // FIXME: Use masked operations to avoid final merge.
1844 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1845   MVT VT = Op.getSimpleValueType();
1846   assert(VT.isVector() && "Unexpected type");
1847 
1848   SDLoc DL(Op);
1849 
1850   // Freeze the source since we are increasing the number of uses.
1851   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1852 
1853   // We do the conversion on the absolute value and fix the sign at the end.
1854   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1855 
1856   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1857   bool Ignored;
1858   APFloat Point5Pred = APFloat(0.5f);
1859   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1860   Point5Pred.next(/*nextDown*/ true);
1861 
1862   // Add the adjustment.
1863   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1864                                DAG.getConstantFP(Point5Pred, DL, VT));
1865 
1866   // Truncate to integer and convert back to fp.
1867   MVT IntVT = VT.changeVectorElementTypeToInteger();
1868   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1869   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1870 
1871   // Restore the original sign.
1872   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1873 
1874   // Determine the largest integer that can be represented exactly. This and
1875   // values larger than it don't have any fractional bits so don't need to
1876   // be converted.
1877   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1878   APFloat MaxVal = APFloat(FltSem);
1879   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1880                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1881   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1882 
1883   // If abs(Src) was larger than MaxVal or nan, keep it.
1884   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1885   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1886   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1887 }
1888 
1889 struct VIDSequence {
1890   int64_t StepNumerator;
1891   unsigned StepDenominator;
1892   int64_t Addend;
1893 };
1894 
1895 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1896 // to the (non-zero) step S and start value X. This can be then lowered as the
1897 // RVV sequence (VID * S) + X, for example.
1898 // The step S is represented as an integer numerator divided by a positive
1899 // denominator. Note that the implementation currently only identifies
1900 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1901 // cannot detect 2/3, for example.
1902 // Note that this method will also match potentially unappealing index
1903 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1904 // determine whether this is worth generating code for.
1905 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1906   unsigned NumElts = Op.getNumOperands();
1907   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1908   if (!Op.getValueType().isInteger())
1909     return None;
1910 
1911   Optional<unsigned> SeqStepDenom;
1912   Optional<int64_t> SeqStepNum, SeqAddend;
1913   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1914   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1915   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1916     // Assume undef elements match the sequence; we just have to be careful
1917     // when interpolating across them.
1918     if (Op.getOperand(Idx).isUndef())
1919       continue;
1920     // The BUILD_VECTOR must be all constants.
1921     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1922       return None;
1923 
1924     uint64_t Val = Op.getConstantOperandVal(Idx) &
1925                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1926 
1927     if (PrevElt) {
1928       // Calculate the step since the last non-undef element, and ensure
1929       // it's consistent across the entire sequence.
1930       unsigned IdxDiff = Idx - PrevElt->second;
1931       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1932 
1933       // A zero-value value difference means that we're somewhere in the middle
1934       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1935       // step change before evaluating the sequence.
1936       if (ValDiff != 0) {
1937         int64_t Remainder = ValDiff % IdxDiff;
1938         // Normalize the step if it's greater than 1.
1939         if (Remainder != ValDiff) {
1940           // The difference must cleanly divide the element span.
1941           if (Remainder != 0)
1942             return None;
1943           ValDiff /= IdxDiff;
1944           IdxDiff = 1;
1945         }
1946 
1947         if (!SeqStepNum)
1948           SeqStepNum = ValDiff;
1949         else if (ValDiff != SeqStepNum)
1950           return None;
1951 
1952         if (!SeqStepDenom)
1953           SeqStepDenom = IdxDiff;
1954         else if (IdxDiff != *SeqStepDenom)
1955           return None;
1956       }
1957     }
1958 
1959     // Record and/or check any addend.
1960     if (SeqStepNum && SeqStepDenom) {
1961       uint64_t ExpectedVal =
1962           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1963       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1964       if (!SeqAddend)
1965         SeqAddend = Addend;
1966       else if (SeqAddend != Addend)
1967         return None;
1968     }
1969 
1970     // Record this non-undef element for later.
1971     if (!PrevElt || PrevElt->first != Val)
1972       PrevElt = std::make_pair(Val, Idx);
1973   }
1974   // We need to have logged both a step and an addend for this to count as
1975   // a legal index sequence.
1976   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1977     return None;
1978 
1979   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1980 }
1981 
1982 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1983 // and lower it as a VRGATHER_VX_VL from the source vector.
1984 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1985                                   SelectionDAG &DAG,
1986                                   const RISCVSubtarget &Subtarget) {
1987   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1988     return SDValue();
1989   SDValue Vec = SplatVal.getOperand(0);
1990   // Only perform this optimization on vectors of the same size for simplicity.
1991   if (Vec.getValueType() != VT)
1992     return SDValue();
1993   SDValue Idx = SplatVal.getOperand(1);
1994   // The index must be a legal type.
1995   if (Idx.getValueType() != Subtarget.getXLenVT())
1996     return SDValue();
1997 
1998   MVT ContainerVT = VT;
1999   if (VT.isFixedLengthVector()) {
2000     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2001     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2002   }
2003 
2004   SDValue Mask, VL;
2005   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2006 
2007   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2008                                Idx, Mask, VL);
2009 
2010   if (!VT.isFixedLengthVector())
2011     return Gather;
2012 
2013   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2014 }
2015 
2016 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2017                                  const RISCVSubtarget &Subtarget) {
2018   MVT VT = Op.getSimpleValueType();
2019   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2020 
2021   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2022 
2023   SDLoc DL(Op);
2024   SDValue Mask, VL;
2025   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2026 
2027   MVT XLenVT = Subtarget.getXLenVT();
2028   unsigned NumElts = Op.getNumOperands();
2029 
2030   if (VT.getVectorElementType() == MVT::i1) {
2031     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2032       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2033       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2034     }
2035 
2036     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2037       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2038       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2039     }
2040 
2041     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2042     // scalar integer chunks whose bit-width depends on the number of mask
2043     // bits and XLEN.
2044     // First, determine the most appropriate scalar integer type to use. This
2045     // is at most XLenVT, but may be shrunk to a smaller vector element type
2046     // according to the size of the final vector - use i8 chunks rather than
2047     // XLenVT if we're producing a v8i1. This results in more consistent
2048     // codegen across RV32 and RV64.
2049     unsigned NumViaIntegerBits =
2050         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2051     NumViaIntegerBits = std::min(NumViaIntegerBits,
2052                                  Subtarget.getMaxELENForFixedLengthVectors());
2053     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2054       // If we have to use more than one INSERT_VECTOR_ELT then this
2055       // optimization is likely to increase code size; avoid peforming it in
2056       // such a case. We can use a load from a constant pool in this case.
2057       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2058         return SDValue();
2059       // Now we can create our integer vector type. Note that it may be larger
2060       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2061       MVT IntegerViaVecVT =
2062           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2063                            divideCeil(NumElts, NumViaIntegerBits));
2064 
2065       uint64_t Bits = 0;
2066       unsigned BitPos = 0, IntegerEltIdx = 0;
2067       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2068 
2069       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2070         // Once we accumulate enough bits to fill our scalar type, insert into
2071         // our vector and clear our accumulated data.
2072         if (I != 0 && I % NumViaIntegerBits == 0) {
2073           if (NumViaIntegerBits <= 32)
2074             Bits = SignExtend64(Bits, 32);
2075           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2076           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2077                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2078           Bits = 0;
2079           BitPos = 0;
2080           IntegerEltIdx++;
2081         }
2082         SDValue V = Op.getOperand(I);
2083         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2084         Bits |= ((uint64_t)BitValue << BitPos);
2085       }
2086 
2087       // Insert the (remaining) scalar value into position in our integer
2088       // vector type.
2089       if (NumViaIntegerBits <= 32)
2090         Bits = SignExtend64(Bits, 32);
2091       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2092       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2093                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2094 
2095       if (NumElts < NumViaIntegerBits) {
2096         // If we're producing a smaller vector than our minimum legal integer
2097         // type, bitcast to the equivalent (known-legal) mask type, and extract
2098         // our final mask.
2099         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2100         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2101         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2102                           DAG.getConstant(0, DL, XLenVT));
2103       } else {
2104         // Else we must have produced an integer type with the same size as the
2105         // mask type; bitcast for the final result.
2106         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2107         Vec = DAG.getBitcast(VT, Vec);
2108       }
2109 
2110       return Vec;
2111     }
2112 
2113     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2114     // vector type, we have a legal equivalently-sized i8 type, so we can use
2115     // that.
2116     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2117     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2118 
2119     SDValue WideVec;
2120     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2121       // For a splat, perform a scalar truncate before creating the wider
2122       // vector.
2123       assert(Splat.getValueType() == XLenVT &&
2124              "Unexpected type for i1 splat value");
2125       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2126                           DAG.getConstant(1, DL, XLenVT));
2127       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2128     } else {
2129       SmallVector<SDValue, 8> Ops(Op->op_values());
2130       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2131       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2132       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2133     }
2134 
2135     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2136   }
2137 
2138   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2139     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2140       return Gather;
2141     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2142                                         : RISCVISD::VMV_V_X_VL;
2143     Splat =
2144         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2145     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2146   }
2147 
2148   // Try and match index sequences, which we can lower to the vid instruction
2149   // with optional modifications. An all-undef vector is matched by
2150   // getSplatValue, above.
2151   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2152     int64_t StepNumerator = SimpleVID->StepNumerator;
2153     unsigned StepDenominator = SimpleVID->StepDenominator;
2154     int64_t Addend = SimpleVID->Addend;
2155 
2156     assert(StepNumerator != 0 && "Invalid step");
2157     bool Negate = false;
2158     int64_t SplatStepVal = StepNumerator;
2159     unsigned StepOpcode = ISD::MUL;
2160     if (StepNumerator != 1) {
2161       if (isPowerOf2_64(std::abs(StepNumerator))) {
2162         Negate = StepNumerator < 0;
2163         StepOpcode = ISD::SHL;
2164         SplatStepVal = Log2_64(std::abs(StepNumerator));
2165       }
2166     }
2167 
2168     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2169     // threshold since it's the immediate value many RVV instructions accept.
2170     // There is no vmul.vi instruction so ensure multiply constant can fit in
2171     // a single addi instruction.
2172     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2173          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2174         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2175       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2176       // Convert right out of the scalable type so we can use standard ISD
2177       // nodes for the rest of the computation. If we used scalable types with
2178       // these, we'd lose the fixed-length vector info and generate worse
2179       // vsetvli code.
2180       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2181       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2182           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2183         SDValue SplatStep = DAG.getSplatVector(
2184             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2185         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2186       }
2187       if (StepDenominator != 1) {
2188         SDValue SplatStep = DAG.getSplatVector(
2189             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2190         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2191       }
2192       if (Addend != 0 || Negate) {
2193         SDValue SplatAddend =
2194             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2195         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2196       }
2197       return VID;
2198     }
2199   }
2200 
2201   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2202   // when re-interpreted as a vector with a larger element type. For example,
2203   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2204   // could be instead splat as
2205   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2206   // TODO: This optimization could also work on non-constant splats, but it
2207   // would require bit-manipulation instructions to construct the splat value.
2208   SmallVector<SDValue> Sequence;
2209   unsigned EltBitSize = VT.getScalarSizeInBits();
2210   const auto *BV = cast<BuildVectorSDNode>(Op);
2211   if (VT.isInteger() && EltBitSize < 64 &&
2212       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2213       BV->getRepeatedSequence(Sequence) &&
2214       (Sequence.size() * EltBitSize) <= 64) {
2215     unsigned SeqLen = Sequence.size();
2216     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2217     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2218     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2219             ViaIntVT == MVT::i64) &&
2220            "Unexpected sequence type");
2221 
2222     unsigned EltIdx = 0;
2223     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2224     uint64_t SplatValue = 0;
2225     // Construct the amalgamated value which can be splatted as this larger
2226     // vector type.
2227     for (const auto &SeqV : Sequence) {
2228       if (!SeqV.isUndef())
2229         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2230                        << (EltIdx * EltBitSize));
2231       EltIdx++;
2232     }
2233 
2234     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2235     // achieve better constant materializion.
2236     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2237       SplatValue = SignExtend64(SplatValue, 32);
2238 
2239     // Since we can't introduce illegal i64 types at this stage, we can only
2240     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2241     // way we can use RVV instructions to splat.
2242     assert((ViaIntVT.bitsLE(XLenVT) ||
2243             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2244            "Unexpected bitcast sequence");
2245     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2246       SDValue ViaVL =
2247           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2248       MVT ViaContainerVT =
2249           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2250       SDValue Splat =
2251           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2252                       DAG.getUNDEF(ViaContainerVT),
2253                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2254       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2255       return DAG.getBitcast(VT, Splat);
2256     }
2257   }
2258 
2259   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2260   // which constitute a large proportion of the elements. In such cases we can
2261   // splat a vector with the dominant element and make up the shortfall with
2262   // INSERT_VECTOR_ELTs.
2263   // Note that this includes vectors of 2 elements by association. The
2264   // upper-most element is the "dominant" one, allowing us to use a splat to
2265   // "insert" the upper element, and an insert of the lower element at position
2266   // 0, which improves codegen.
2267   SDValue DominantValue;
2268   unsigned MostCommonCount = 0;
2269   DenseMap<SDValue, unsigned> ValueCounts;
2270   unsigned NumUndefElts =
2271       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2272 
2273   // Track the number of scalar loads we know we'd be inserting, estimated as
2274   // any non-zero floating-point constant. Other kinds of element are either
2275   // already in registers or are materialized on demand. The threshold at which
2276   // a vector load is more desirable than several scalar materializion and
2277   // vector-insertion instructions is not known.
2278   unsigned NumScalarLoads = 0;
2279 
2280   for (SDValue V : Op->op_values()) {
2281     if (V.isUndef())
2282       continue;
2283 
2284     ValueCounts.insert(std::make_pair(V, 0));
2285     unsigned &Count = ValueCounts[V];
2286 
2287     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2288       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2289 
2290     // Is this value dominant? In case of a tie, prefer the highest element as
2291     // it's cheaper to insert near the beginning of a vector than it is at the
2292     // end.
2293     if (++Count >= MostCommonCount) {
2294       DominantValue = V;
2295       MostCommonCount = Count;
2296     }
2297   }
2298 
2299   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2300   unsigned NumDefElts = NumElts - NumUndefElts;
2301   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2302 
2303   // Don't perform this optimization when optimizing for size, since
2304   // materializing elements and inserting them tends to cause code bloat.
2305   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2306       ((MostCommonCount > DominantValueCountThreshold) ||
2307        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2308     // Start by splatting the most common element.
2309     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2310 
2311     DenseSet<SDValue> Processed{DominantValue};
2312     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2313     for (const auto &OpIdx : enumerate(Op->ops())) {
2314       const SDValue &V = OpIdx.value();
2315       if (V.isUndef() || !Processed.insert(V).second)
2316         continue;
2317       if (ValueCounts[V] == 1) {
2318         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2319                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2320       } else {
2321         // Blend in all instances of this value using a VSELECT, using a
2322         // mask where each bit signals whether that element is the one
2323         // we're after.
2324         SmallVector<SDValue> Ops;
2325         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2326           return DAG.getConstant(V == V1, DL, XLenVT);
2327         });
2328         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2329                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2330                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2331       }
2332     }
2333 
2334     return Vec;
2335   }
2336 
2337   return SDValue();
2338 }
2339 
2340 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2341                                    SDValue Lo, SDValue Hi, SDValue VL,
2342                                    SelectionDAG &DAG) {
2343   if (!Passthru)
2344     Passthru = DAG.getUNDEF(VT);
2345   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2346     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2347     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2348     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2349     // node in order to try and match RVV vector/scalar instructions.
2350     if ((LoC >> 31) == HiC)
2351       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2352 
2353     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2354     // vmv.v.x whose EEW = 32 to lower it.
2355     auto *Const = dyn_cast<ConstantSDNode>(VL);
2356     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2357       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2358       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2359       // access the subtarget here now.
2360       auto InterVec = DAG.getNode(
2361           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2362                                   DAG.getRegister(RISCV::X0, MVT::i32));
2363       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2364     }
2365   }
2366 
2367   // Fall back to a stack store and stride x0 vector load.
2368   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2369                      Hi, VL);
2370 }
2371 
2372 // Called by type legalization to handle splat of i64 on RV32.
2373 // FIXME: We can optimize this when the type has sign or zero bits in one
2374 // of the halves.
2375 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2376                                    SDValue Scalar, SDValue VL,
2377                                    SelectionDAG &DAG) {
2378   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2379   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2380                            DAG.getConstant(0, DL, MVT::i32));
2381   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2382                            DAG.getConstant(1, DL, MVT::i32));
2383   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2384 }
2385 
2386 // This function lowers a splat of a scalar operand Splat with the vector
2387 // length VL. It ensures the final sequence is type legal, which is useful when
2388 // lowering a splat after type legalization.
2389 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2390                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2391                                 const RISCVSubtarget &Subtarget) {
2392   bool HasPassthru = Passthru && !Passthru.isUndef();
2393   if (!HasPassthru && !Passthru)
2394     Passthru = DAG.getUNDEF(VT);
2395   if (VT.isFloatingPoint()) {
2396     // If VL is 1, we could use vfmv.s.f.
2397     if (isOneConstant(VL))
2398       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2399     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2400   }
2401 
2402   MVT XLenVT = Subtarget.getXLenVT();
2403 
2404   // Simplest case is that the operand needs to be promoted to XLenVT.
2405   if (Scalar.getValueType().bitsLE(XLenVT)) {
2406     // If the operand is a constant, sign extend to increase our chances
2407     // of being able to use a .vi instruction. ANY_EXTEND would become a
2408     // a zero extend and the simm5 check in isel would fail.
2409     // FIXME: Should we ignore the upper bits in isel instead?
2410     unsigned ExtOpc =
2411         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2412     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2413     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2414     // If VL is 1 and the scalar value won't benefit from immediate, we could
2415     // use vmv.s.x.
2416     if (isOneConstant(VL) &&
2417         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2418       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2419     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2420   }
2421 
2422   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2423          "Unexpected scalar for splat lowering!");
2424 
2425   if (isOneConstant(VL) && isNullConstant(Scalar))
2426     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2427                        DAG.getConstant(0, DL, XLenVT), VL);
2428 
2429   // Otherwise use the more complicated splatting algorithm.
2430   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2431 }
2432 
2433 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2434                                 const RISCVSubtarget &Subtarget) {
2435   // We need to be able to widen elements to the next larger integer type.
2436   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2437     return false;
2438 
2439   int Size = Mask.size();
2440   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2441 
2442   int Srcs[] = {-1, -1};
2443   for (int i = 0; i != Size; ++i) {
2444     // Ignore undef elements.
2445     if (Mask[i] < 0)
2446       continue;
2447 
2448     // Is this an even or odd element.
2449     int Pol = i % 2;
2450 
2451     // Ensure we consistently use the same source for this element polarity.
2452     int Src = Mask[i] / Size;
2453     if (Srcs[Pol] < 0)
2454       Srcs[Pol] = Src;
2455     if (Srcs[Pol] != Src)
2456       return false;
2457 
2458     // Make sure the element within the source is appropriate for this element
2459     // in the destination.
2460     int Elt = Mask[i] % Size;
2461     if (Elt != i / 2)
2462       return false;
2463   }
2464 
2465   // We need to find a source for each polarity and they can't be the same.
2466   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2467     return false;
2468 
2469   // Swap the sources if the second source was in the even polarity.
2470   SwapSources = Srcs[0] > Srcs[1];
2471 
2472   return true;
2473 }
2474 
2475 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2476 /// and then extract the original number of elements from the rotated result.
2477 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2478 /// returned rotation amount is for a rotate right, where elements move from
2479 /// higher elements to lower elements. \p LoSrc indicates the first source
2480 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2481 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2482 /// 0 or 1 if a rotation is found.
2483 ///
2484 /// NOTE: We talk about rotate to the right which matches how bit shift and
2485 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2486 /// and the table below write vectors with the lowest elements on the left.
2487 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2488   int Size = Mask.size();
2489 
2490   // We need to detect various ways of spelling a rotation:
2491   //   [11, 12, 13, 14, 15,  0,  1,  2]
2492   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2493   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2494   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2495   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2496   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2497   int Rotation = 0;
2498   LoSrc = -1;
2499   HiSrc = -1;
2500   for (int i = 0; i != Size; ++i) {
2501     int M = Mask[i];
2502     if (M < 0)
2503       continue;
2504 
2505     // Determine where a rotate vector would have started.
2506     int StartIdx = i - (M % Size);
2507     // The identity rotation isn't interesting, stop.
2508     if (StartIdx == 0)
2509       return -1;
2510 
2511     // If we found the tail of a vector the rotation must be the missing
2512     // front. If we found the head of a vector, it must be how much of the
2513     // head.
2514     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2515 
2516     if (Rotation == 0)
2517       Rotation = CandidateRotation;
2518     else if (Rotation != CandidateRotation)
2519       // The rotations don't match, so we can't match this mask.
2520       return -1;
2521 
2522     // Compute which value this mask is pointing at.
2523     int MaskSrc = M < Size ? 0 : 1;
2524 
2525     // Compute which of the two target values this index should be assigned to.
2526     // This reflects whether the high elements are remaining or the low elemnts
2527     // are remaining.
2528     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2529 
2530     // Either set up this value if we've not encountered it before, or check
2531     // that it remains consistent.
2532     if (TargetSrc < 0)
2533       TargetSrc = MaskSrc;
2534     else if (TargetSrc != MaskSrc)
2535       // This may be a rotation, but it pulls from the inputs in some
2536       // unsupported interleaving.
2537       return -1;
2538   }
2539 
2540   // Check that we successfully analyzed the mask, and normalize the results.
2541   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2542   assert((LoSrc >= 0 || HiSrc >= 0) &&
2543          "Failed to find a rotated input vector!");
2544 
2545   return Rotation;
2546 }
2547 
2548 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2549                                    const RISCVSubtarget &Subtarget) {
2550   SDValue V1 = Op.getOperand(0);
2551   SDValue V2 = Op.getOperand(1);
2552   SDLoc DL(Op);
2553   MVT XLenVT = Subtarget.getXLenVT();
2554   MVT VT = Op.getSimpleValueType();
2555   unsigned NumElts = VT.getVectorNumElements();
2556   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2557 
2558   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2559 
2560   SDValue TrueMask, VL;
2561   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2562 
2563   if (SVN->isSplat()) {
2564     const int Lane = SVN->getSplatIndex();
2565     if (Lane >= 0) {
2566       MVT SVT = VT.getVectorElementType();
2567 
2568       // Turn splatted vector load into a strided load with an X0 stride.
2569       SDValue V = V1;
2570       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2571       // with undef.
2572       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2573       int Offset = Lane;
2574       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2575         int OpElements =
2576             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2577         V = V.getOperand(Offset / OpElements);
2578         Offset %= OpElements;
2579       }
2580 
2581       // We need to ensure the load isn't atomic or volatile.
2582       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2583         auto *Ld = cast<LoadSDNode>(V);
2584         Offset *= SVT.getStoreSize();
2585         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2586                                                    TypeSize::Fixed(Offset), DL);
2587 
2588         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2589         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2590           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2591           SDValue IntID =
2592               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2593           SDValue Ops[] = {Ld->getChain(),
2594                            IntID,
2595                            DAG.getUNDEF(ContainerVT),
2596                            NewAddr,
2597                            DAG.getRegister(RISCV::X0, XLenVT),
2598                            VL};
2599           SDValue NewLoad = DAG.getMemIntrinsicNode(
2600               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2601               DAG.getMachineFunction().getMachineMemOperand(
2602                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2603           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2604           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2605         }
2606 
2607         // Otherwise use a scalar load and splat. This will give the best
2608         // opportunity to fold a splat into the operation. ISel can turn it into
2609         // the x0 strided load if we aren't able to fold away the select.
2610         if (SVT.isFloatingPoint())
2611           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2612                           Ld->getPointerInfo().getWithOffset(Offset),
2613                           Ld->getOriginalAlign(),
2614                           Ld->getMemOperand()->getFlags());
2615         else
2616           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2617                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2618                              Ld->getOriginalAlign(),
2619                              Ld->getMemOperand()->getFlags());
2620         DAG.makeEquivalentMemoryOrdering(Ld, V);
2621 
2622         unsigned Opc =
2623             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2624         SDValue Splat =
2625             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2626         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2627       }
2628 
2629       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2630       assert(Lane < (int)NumElts && "Unexpected lane!");
2631       SDValue Gather =
2632           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2633                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2634       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2635     }
2636   }
2637 
2638   ArrayRef<int> Mask = SVN->getMask();
2639 
2640   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2641   // be undef which can be handled with a single SLIDEDOWN/UP.
2642   int LoSrc, HiSrc;
2643   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2644   if (Rotation > 0) {
2645     SDValue LoV, HiV;
2646     if (LoSrc >= 0) {
2647       LoV = LoSrc == 0 ? V1 : V2;
2648       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2649     }
2650     if (HiSrc >= 0) {
2651       HiV = HiSrc == 0 ? V1 : V2;
2652       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2653     }
2654 
2655     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2656     // to slide LoV up by (NumElts - Rotation).
2657     unsigned InvRotate = NumElts - Rotation;
2658 
2659     SDValue Res = DAG.getUNDEF(ContainerVT);
2660     if (HiV) {
2661       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2662       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2663       // causes multiple vsetvlis in some test cases such as lowering
2664       // reduce.mul
2665       SDValue DownVL = VL;
2666       if (LoV)
2667         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2668       Res =
2669           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2670                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2671     }
2672     if (LoV)
2673       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2674                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2675 
2676     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2677   }
2678 
2679   // Detect an interleave shuffle and lower to
2680   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2681   bool SwapSources;
2682   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2683     // Swap sources if needed.
2684     if (SwapSources)
2685       std::swap(V1, V2);
2686 
2687     // Extract the lower half of the vectors.
2688     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2689     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2690                      DAG.getConstant(0, DL, XLenVT));
2691     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2692                      DAG.getConstant(0, DL, XLenVT));
2693 
2694     // Double the element width and halve the number of elements in an int type.
2695     unsigned EltBits = VT.getScalarSizeInBits();
2696     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2697     MVT WideIntVT =
2698         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2699     // Convert this to a scalable vector. We need to base this on the
2700     // destination size to ensure there's always a type with a smaller LMUL.
2701     MVT WideIntContainerVT =
2702         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2703 
2704     // Convert sources to scalable vectors with the same element count as the
2705     // larger type.
2706     MVT HalfContainerVT = MVT::getVectorVT(
2707         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2708     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2709     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2710 
2711     // Cast sources to integer.
2712     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2713     MVT IntHalfVT =
2714         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2715     V1 = DAG.getBitcast(IntHalfVT, V1);
2716     V2 = DAG.getBitcast(IntHalfVT, V2);
2717 
2718     // Freeze V2 since we use it twice and we need to be sure that the add and
2719     // multiply see the same value.
2720     V2 = DAG.getFreeze(V2);
2721 
2722     // Recreate TrueMask using the widened type's element count.
2723     MVT MaskVT =
2724         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2725     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2726 
2727     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2728     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2729                               V2, TrueMask, VL);
2730     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2731     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2732                                      DAG.getUNDEF(IntHalfVT),
2733                                      DAG.getAllOnesConstant(DL, XLenVT));
2734     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2735                                    V2, Multiplier, TrueMask, VL);
2736     // Add the new copies to our previous addition giving us 2^eltbits copies of
2737     // V2. This is equivalent to shifting V2 left by eltbits. This should
2738     // combine with the vwmulu.vv above to form vwmaccu.vv.
2739     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2740                       TrueMask, VL);
2741     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2742     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2743     // vector VT.
2744     ContainerVT =
2745         MVT::getVectorVT(VT.getVectorElementType(),
2746                          WideIntContainerVT.getVectorElementCount() * 2);
2747     Add = DAG.getBitcast(ContainerVT, Add);
2748     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2749   }
2750 
2751   // Detect shuffles which can be re-expressed as vector selects; these are
2752   // shuffles in which each element in the destination is taken from an element
2753   // at the corresponding index in either source vectors.
2754   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2755     int MaskIndex = MaskIdx.value();
2756     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2757   });
2758 
2759   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2760 
2761   SmallVector<SDValue> MaskVals;
2762   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2763   // merged with a second vrgather.
2764   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2765 
2766   // By default we preserve the original operand order, and use a mask to
2767   // select LHS as true and RHS as false. However, since RVV vector selects may
2768   // feature splats but only on the LHS, we may choose to invert our mask and
2769   // instead select between RHS and LHS.
2770   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2771   bool InvertMask = IsSelect == SwapOps;
2772 
2773   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2774   // half.
2775   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2776 
2777   // Now construct the mask that will be used by the vselect or blended
2778   // vrgather operation. For vrgathers, construct the appropriate indices into
2779   // each vector.
2780   for (int MaskIndex : Mask) {
2781     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2782     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2783     if (!IsSelect) {
2784       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2785       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2786                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2787                                      : DAG.getUNDEF(XLenVT));
2788       GatherIndicesRHS.push_back(
2789           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2790                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2791       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2792         ++LHSIndexCounts[MaskIndex];
2793       if (!IsLHSOrUndefIndex)
2794         ++RHSIndexCounts[MaskIndex - NumElts];
2795     }
2796   }
2797 
2798   if (SwapOps) {
2799     std::swap(V1, V2);
2800     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2801   }
2802 
2803   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2804   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2805   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2806 
2807   if (IsSelect)
2808     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2809 
2810   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2811     // On such a large vector we're unable to use i8 as the index type.
2812     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2813     // may involve vector splitting if we're already at LMUL=8, or our
2814     // user-supplied maximum fixed-length LMUL.
2815     return SDValue();
2816   }
2817 
2818   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2819   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2820   MVT IndexVT = VT.changeTypeToInteger();
2821   // Since we can't introduce illegal index types at this stage, use i16 and
2822   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2823   // than XLenVT.
2824   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2825     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2826     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2827   }
2828 
2829   MVT IndexContainerVT =
2830       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2831 
2832   SDValue Gather;
2833   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2834   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2835   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2836     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2837                               Subtarget);
2838   } else {
2839     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2840     // If only one index is used, we can use a "splat" vrgather.
2841     // TODO: We can splat the most-common index and fix-up any stragglers, if
2842     // that's beneficial.
2843     if (LHSIndexCounts.size() == 1) {
2844       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2845       Gather =
2846           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2847                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2848     } else {
2849       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2850       LHSIndices =
2851           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2852 
2853       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2854                            TrueMask, VL);
2855     }
2856   }
2857 
2858   // If a second vector operand is used by this shuffle, blend it in with an
2859   // additional vrgather.
2860   if (!V2.isUndef()) {
2861     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2862     // If only one index is used, we can use a "splat" vrgather.
2863     // TODO: We can splat the most-common index and fix-up any stragglers, if
2864     // that's beneficial.
2865     if (RHSIndexCounts.size() == 1) {
2866       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2867       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2868                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2869     } else {
2870       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2871       RHSIndices =
2872           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2873       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2874                        VL);
2875     }
2876 
2877     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2878     SelectMask =
2879         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2880 
2881     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2882                          Gather, VL);
2883   }
2884 
2885   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2886 }
2887 
2888 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2889   // Support splats for any type. These should type legalize well.
2890   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2891     return true;
2892 
2893   // Only support legal VTs for other shuffles for now.
2894   if (!isTypeLegal(VT))
2895     return false;
2896 
2897   MVT SVT = VT.getSimpleVT();
2898 
2899   bool SwapSources;
2900   int LoSrc, HiSrc;
2901   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2902          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2903 }
2904 
2905 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2906                                      SDLoc DL, SelectionDAG &DAG,
2907                                      const RISCVSubtarget &Subtarget) {
2908   if (VT.isScalableVector())
2909     return DAG.getFPExtendOrRound(Op, DL, VT);
2910   assert(VT.isFixedLengthVector() &&
2911          "Unexpected value type for RVV FP extend/round lowering");
2912   SDValue Mask, VL;
2913   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2914   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2915                         ? RISCVISD::FP_EXTEND_VL
2916                         : RISCVISD::FP_ROUND_VL;
2917   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2918 }
2919 
2920 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2921 // the exponent.
2922 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2923   MVT VT = Op.getSimpleValueType();
2924   unsigned EltSize = VT.getScalarSizeInBits();
2925   SDValue Src = Op.getOperand(0);
2926   SDLoc DL(Op);
2927 
2928   // We need a FP type that can represent the value.
2929   // TODO: Use f16 for i8 when possible?
2930   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2931   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2932 
2933   // Legal types should have been checked in the RISCVTargetLowering
2934   // constructor.
2935   // TODO: Splitting may make sense in some cases.
2936   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2937          "Expected legal float type!");
2938 
2939   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2940   // The trailing zero count is equal to log2 of this single bit value.
2941   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2942     SDValue Neg =
2943         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2944     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2945   }
2946 
2947   // We have a legal FP type, convert to it.
2948   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2949   // Bitcast to integer and shift the exponent to the LSB.
2950   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2951   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2952   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2953   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2954                               DAG.getConstant(ShiftAmt, DL, IntVT));
2955   // Truncate back to original type to allow vnsrl.
2956   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2957   // The exponent contains log2 of the value in biased form.
2958   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2959 
2960   // For trailing zeros, we just need to subtract the bias.
2961   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2962     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2963                        DAG.getConstant(ExponentBias, DL, VT));
2964 
2965   // For leading zeros, we need to remove the bias and convert from log2 to
2966   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2967   unsigned Adjust = ExponentBias + (EltSize - 1);
2968   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2969 }
2970 
2971 // While RVV has alignment restrictions, we should always be able to load as a
2972 // legal equivalently-sized byte-typed vector instead. This method is
2973 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2974 // the load is already correctly-aligned, it returns SDValue().
2975 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2976                                                     SelectionDAG &DAG) const {
2977   auto *Load = cast<LoadSDNode>(Op);
2978   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2979 
2980   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2981                                      Load->getMemoryVT(),
2982                                      *Load->getMemOperand()))
2983     return SDValue();
2984 
2985   SDLoc DL(Op);
2986   MVT VT = Op.getSimpleValueType();
2987   unsigned EltSizeBits = VT.getScalarSizeInBits();
2988   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2989          "Unexpected unaligned RVV load type");
2990   MVT NewVT =
2991       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2992   assert(NewVT.isValid() &&
2993          "Expecting equally-sized RVV vector types to be legal");
2994   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2995                           Load->getPointerInfo(), Load->getOriginalAlign(),
2996                           Load->getMemOperand()->getFlags());
2997   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2998 }
2999 
3000 // While RVV has alignment restrictions, we should always be able to store as a
3001 // legal equivalently-sized byte-typed vector instead. This method is
3002 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
3003 // returns SDValue() if the store is already correctly aligned.
3004 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
3005                                                      SelectionDAG &DAG) const {
3006   auto *Store = cast<StoreSDNode>(Op);
3007   assert(Store && Store->getValue().getValueType().isVector() &&
3008          "Expected vector store");
3009 
3010   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
3011                                      Store->getMemoryVT(),
3012                                      *Store->getMemOperand()))
3013     return SDValue();
3014 
3015   SDLoc DL(Op);
3016   SDValue StoredVal = Store->getValue();
3017   MVT VT = StoredVal.getSimpleValueType();
3018   unsigned EltSizeBits = VT.getScalarSizeInBits();
3019   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3020          "Unexpected unaligned RVV store type");
3021   MVT NewVT =
3022       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3023   assert(NewVT.isValid() &&
3024          "Expecting equally-sized RVV vector types to be legal");
3025   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3026   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3027                       Store->getPointerInfo(), Store->getOriginalAlign(),
3028                       Store->getMemOperand()->getFlags());
3029 }
3030 
3031 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3032                                             SelectionDAG &DAG) const {
3033   switch (Op.getOpcode()) {
3034   default:
3035     report_fatal_error("unimplemented operand");
3036   case ISD::GlobalAddress:
3037     return lowerGlobalAddress(Op, DAG);
3038   case ISD::BlockAddress:
3039     return lowerBlockAddress(Op, DAG);
3040   case ISD::ConstantPool:
3041     return lowerConstantPool(Op, DAG);
3042   case ISD::JumpTable:
3043     return lowerJumpTable(Op, DAG);
3044   case ISD::GlobalTLSAddress:
3045     return lowerGlobalTLSAddress(Op, DAG);
3046   case ISD::SELECT:
3047     return lowerSELECT(Op, DAG);
3048   case ISD::BRCOND:
3049     return lowerBRCOND(Op, DAG);
3050   case ISD::VASTART:
3051     return lowerVASTART(Op, DAG);
3052   case ISD::FRAMEADDR:
3053     return lowerFRAMEADDR(Op, DAG);
3054   case ISD::RETURNADDR:
3055     return lowerRETURNADDR(Op, DAG);
3056   case ISD::SHL_PARTS:
3057     return lowerShiftLeftParts(Op, DAG);
3058   case ISD::SRA_PARTS:
3059     return lowerShiftRightParts(Op, DAG, true);
3060   case ISD::SRL_PARTS:
3061     return lowerShiftRightParts(Op, DAG, false);
3062   case ISD::BITCAST: {
3063     SDLoc DL(Op);
3064     EVT VT = Op.getValueType();
3065     SDValue Op0 = Op.getOperand(0);
3066     EVT Op0VT = Op0.getValueType();
3067     MVT XLenVT = Subtarget.getXLenVT();
3068     if (VT.isFixedLengthVector()) {
3069       // We can handle fixed length vector bitcasts with a simple replacement
3070       // in isel.
3071       if (Op0VT.isFixedLengthVector())
3072         return Op;
3073       // When bitcasting from scalar to fixed-length vector, insert the scalar
3074       // into a one-element vector of the result type, and perform a vector
3075       // bitcast.
3076       if (!Op0VT.isVector()) {
3077         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3078         if (!isTypeLegal(BVT))
3079           return SDValue();
3080         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3081                                               DAG.getUNDEF(BVT), Op0,
3082                                               DAG.getConstant(0, DL, XLenVT)));
3083       }
3084       return SDValue();
3085     }
3086     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3087     // thus: bitcast the vector to a one-element vector type whose element type
3088     // is the same as the result type, and extract the first element.
3089     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3090       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3091       if (!isTypeLegal(BVT))
3092         return SDValue();
3093       SDValue BVec = DAG.getBitcast(BVT, Op0);
3094       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3095                          DAG.getConstant(0, DL, XLenVT));
3096     }
3097     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3098       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3099       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3100       return FPConv;
3101     }
3102     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3103         Subtarget.hasStdExtF()) {
3104       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3105       SDValue FPConv =
3106           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3107       return FPConv;
3108     }
3109     return SDValue();
3110   }
3111   case ISD::INTRINSIC_WO_CHAIN:
3112     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3113   case ISD::INTRINSIC_W_CHAIN:
3114     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3115   case ISD::INTRINSIC_VOID:
3116     return LowerINTRINSIC_VOID(Op, DAG);
3117   case ISD::BSWAP:
3118   case ISD::BITREVERSE: {
3119     MVT VT = Op.getSimpleValueType();
3120     SDLoc DL(Op);
3121     if (Subtarget.hasStdExtZbp()) {
3122       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3123       // Start with the maximum immediate value which is the bitwidth - 1.
3124       unsigned Imm = VT.getSizeInBits() - 1;
3125       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3126       if (Op.getOpcode() == ISD::BSWAP)
3127         Imm &= ~0x7U;
3128       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3129                          DAG.getConstant(Imm, DL, VT));
3130     }
3131     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3132     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3133     // Expand bitreverse to a bswap(rev8) followed by brev8.
3134     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3135     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3136     // as brev8 by an isel pattern.
3137     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3138                        DAG.getConstant(7, DL, VT));
3139   }
3140   case ISD::FSHL:
3141   case ISD::FSHR: {
3142     MVT VT = Op.getSimpleValueType();
3143     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3144     SDLoc DL(Op);
3145     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3146     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3147     // accidentally setting the extra bit.
3148     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3149     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3150                                 DAG.getConstant(ShAmtWidth, DL, VT));
3151     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3152     // instruction use different orders. fshl will return its first operand for
3153     // shift of zero, fshr will return its second operand. fsl and fsr both
3154     // return rs1 so the ISD nodes need to have different operand orders.
3155     // Shift amount is in rs2.
3156     SDValue Op0 = Op.getOperand(0);
3157     SDValue Op1 = Op.getOperand(1);
3158     unsigned Opc = RISCVISD::FSL;
3159     if (Op.getOpcode() == ISD::FSHR) {
3160       std::swap(Op0, Op1);
3161       Opc = RISCVISD::FSR;
3162     }
3163     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3164   }
3165   case ISD::TRUNCATE: {
3166     SDLoc DL(Op);
3167     MVT VT = Op.getSimpleValueType();
3168     // Only custom-lower vector truncates
3169     if (!VT.isVector())
3170       return Op;
3171 
3172     // Truncates to mask types are handled differently
3173     if (VT.getVectorElementType() == MVT::i1)
3174       return lowerVectorMaskTrunc(Op, DAG);
3175 
3176     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3177     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3178     // truncate by one power of two at a time.
3179     MVT DstEltVT = VT.getVectorElementType();
3180 
3181     SDValue Src = Op.getOperand(0);
3182     MVT SrcVT = Src.getSimpleValueType();
3183     MVT SrcEltVT = SrcVT.getVectorElementType();
3184 
3185     assert(DstEltVT.bitsLT(SrcEltVT) &&
3186            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3187            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3188            "Unexpected vector truncate lowering");
3189 
3190     MVT ContainerVT = SrcVT;
3191     if (SrcVT.isFixedLengthVector()) {
3192       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3193       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3194     }
3195 
3196     SDValue Result = Src;
3197     SDValue Mask, VL;
3198     std::tie(Mask, VL) =
3199         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3200     LLVMContext &Context = *DAG.getContext();
3201     const ElementCount Count = ContainerVT.getVectorElementCount();
3202     do {
3203       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3204       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3205       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3206                            Mask, VL);
3207     } while (SrcEltVT != DstEltVT);
3208 
3209     if (SrcVT.isFixedLengthVector())
3210       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3211 
3212     return Result;
3213   }
3214   case ISD::ANY_EXTEND:
3215   case ISD::ZERO_EXTEND:
3216     if (Op.getOperand(0).getValueType().isVector() &&
3217         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3218       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3219     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3220   case ISD::SIGN_EXTEND:
3221     if (Op.getOperand(0).getValueType().isVector() &&
3222         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3223       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3224     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3225   case ISD::SPLAT_VECTOR_PARTS:
3226     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3227   case ISD::INSERT_VECTOR_ELT:
3228     return lowerINSERT_VECTOR_ELT(Op, DAG);
3229   case ISD::EXTRACT_VECTOR_ELT:
3230     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3231   case ISD::VSCALE: {
3232     MVT VT = Op.getSimpleValueType();
3233     SDLoc DL(Op);
3234     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3235     // We define our scalable vector types for lmul=1 to use a 64 bit known
3236     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3237     // vscale as VLENB / 8.
3238     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3239     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3240       report_fatal_error("Support for VLEN==32 is incomplete.");
3241     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3242       // We assume VLENB is a multiple of 8. We manually choose the best shift
3243       // here because SimplifyDemandedBits isn't always able to simplify it.
3244       uint64_t Val = Op.getConstantOperandVal(0);
3245       if (isPowerOf2_64(Val)) {
3246         uint64_t Log2 = Log2_64(Val);
3247         if (Log2 < 3)
3248           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3249                              DAG.getConstant(3 - Log2, DL, VT));
3250         if (Log2 > 3)
3251           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3252                              DAG.getConstant(Log2 - 3, DL, VT));
3253         return VLENB;
3254       }
3255       // If the multiplier is a multiple of 8, scale it down to avoid needing
3256       // to shift the VLENB value.
3257       if ((Val % 8) == 0)
3258         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3259                            DAG.getConstant(Val / 8, DL, VT));
3260     }
3261 
3262     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3263                                  DAG.getConstant(3, DL, VT));
3264     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3265   }
3266   case ISD::FPOWI: {
3267     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3268     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3269     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3270         Op.getOperand(1).getValueType() == MVT::i32) {
3271       SDLoc DL(Op);
3272       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3273       SDValue Powi =
3274           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3275       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3276                          DAG.getIntPtrConstant(0, DL));
3277     }
3278     return SDValue();
3279   }
3280   case ISD::FP_EXTEND: {
3281     // RVV can only do fp_extend to types double the size as the source. We
3282     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3283     // via f32.
3284     SDLoc DL(Op);
3285     MVT VT = Op.getSimpleValueType();
3286     SDValue Src = Op.getOperand(0);
3287     MVT SrcVT = Src.getSimpleValueType();
3288 
3289     // Prepare any fixed-length vector operands.
3290     MVT ContainerVT = VT;
3291     if (SrcVT.isFixedLengthVector()) {
3292       ContainerVT = getContainerForFixedLengthVector(VT);
3293       MVT SrcContainerVT =
3294           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3295       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3296     }
3297 
3298     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3299         SrcVT.getVectorElementType() != MVT::f16) {
3300       // For scalable vectors, we only need to close the gap between
3301       // vXf16->vXf64.
3302       if (!VT.isFixedLengthVector())
3303         return Op;
3304       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3305       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3306       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3307     }
3308 
3309     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3310     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3311     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3312         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3313 
3314     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3315                                            DL, DAG, Subtarget);
3316     if (VT.isFixedLengthVector())
3317       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3318     return Extend;
3319   }
3320   case ISD::FP_ROUND: {
3321     // RVV can only do fp_round to types half the size as the source. We
3322     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3323     // conversion instruction.
3324     SDLoc DL(Op);
3325     MVT VT = Op.getSimpleValueType();
3326     SDValue Src = Op.getOperand(0);
3327     MVT SrcVT = Src.getSimpleValueType();
3328 
3329     // Prepare any fixed-length vector operands.
3330     MVT ContainerVT = VT;
3331     if (VT.isFixedLengthVector()) {
3332       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3333       ContainerVT =
3334           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3335       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3336     }
3337 
3338     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3339         SrcVT.getVectorElementType() != MVT::f64) {
3340       // For scalable vectors, we only need to close the gap between
3341       // vXf64<->vXf16.
3342       if (!VT.isFixedLengthVector())
3343         return Op;
3344       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3345       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3346       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3347     }
3348 
3349     SDValue Mask, VL;
3350     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3351 
3352     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3353     SDValue IntermediateRound =
3354         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3355     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3356                                           DL, DAG, Subtarget);
3357 
3358     if (VT.isFixedLengthVector())
3359       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3360     return Round;
3361   }
3362   case ISD::FP_TO_SINT:
3363   case ISD::FP_TO_UINT:
3364   case ISD::SINT_TO_FP:
3365   case ISD::UINT_TO_FP: {
3366     // RVV can only do fp<->int conversions to types half/double the size as
3367     // the source. We custom-lower any conversions that do two hops into
3368     // sequences.
3369     MVT VT = Op.getSimpleValueType();
3370     if (!VT.isVector())
3371       return Op;
3372     SDLoc DL(Op);
3373     SDValue Src = Op.getOperand(0);
3374     MVT EltVT = VT.getVectorElementType();
3375     MVT SrcVT = Src.getSimpleValueType();
3376     MVT SrcEltVT = SrcVT.getVectorElementType();
3377     unsigned EltSize = EltVT.getSizeInBits();
3378     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3379     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3380            "Unexpected vector element types");
3381 
3382     bool IsInt2FP = SrcEltVT.isInteger();
3383     // Widening conversions
3384     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3385       if (IsInt2FP) {
3386         // Do a regular integer sign/zero extension then convert to float.
3387         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3388                                       VT.getVectorElementCount());
3389         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3390                                  ? ISD::ZERO_EXTEND
3391                                  : ISD::SIGN_EXTEND;
3392         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3393         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3394       }
3395       // FP2Int
3396       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3397       // Do one doubling fp_extend then complete the operation by converting
3398       // to int.
3399       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3400       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3401       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3402     }
3403 
3404     // Narrowing conversions
3405     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3406       if (IsInt2FP) {
3407         // One narrowing int_to_fp, then an fp_round.
3408         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3409         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3410         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3411         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3412       }
3413       // FP2Int
3414       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3415       // representable by the integer, the result is poison.
3416       MVT IVecVT =
3417           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3418                            VT.getVectorElementCount());
3419       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3420       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3421     }
3422 
3423     // Scalable vectors can exit here. Patterns will handle equally-sized
3424     // conversions halving/doubling ones.
3425     if (!VT.isFixedLengthVector())
3426       return Op;
3427 
3428     // For fixed-length vectors we lower to a custom "VL" node.
3429     unsigned RVVOpc = 0;
3430     switch (Op.getOpcode()) {
3431     default:
3432       llvm_unreachable("Impossible opcode");
3433     case ISD::FP_TO_SINT:
3434       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3435       break;
3436     case ISD::FP_TO_UINT:
3437       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3438       break;
3439     case ISD::SINT_TO_FP:
3440       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3441       break;
3442     case ISD::UINT_TO_FP:
3443       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3444       break;
3445     }
3446 
3447     MVT ContainerVT, SrcContainerVT;
3448     // Derive the reference container type from the larger vector type.
3449     if (SrcEltSize > EltSize) {
3450       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3451       ContainerVT =
3452           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3453     } else {
3454       ContainerVT = getContainerForFixedLengthVector(VT);
3455       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3456     }
3457 
3458     SDValue Mask, VL;
3459     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3460 
3461     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3462     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3463     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3464   }
3465   case ISD::FP_TO_SINT_SAT:
3466   case ISD::FP_TO_UINT_SAT:
3467     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3468   case ISD::FTRUNC:
3469   case ISD::FCEIL:
3470   case ISD::FFLOOR:
3471     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3472   case ISD::FROUND:
3473     return lowerFROUND(Op, DAG);
3474   case ISD::VECREDUCE_ADD:
3475   case ISD::VECREDUCE_UMAX:
3476   case ISD::VECREDUCE_SMAX:
3477   case ISD::VECREDUCE_UMIN:
3478   case ISD::VECREDUCE_SMIN:
3479     return lowerVECREDUCE(Op, DAG);
3480   case ISD::VECREDUCE_AND:
3481   case ISD::VECREDUCE_OR:
3482   case ISD::VECREDUCE_XOR:
3483     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3484       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3485     return lowerVECREDUCE(Op, DAG);
3486   case ISD::VECREDUCE_FADD:
3487   case ISD::VECREDUCE_SEQ_FADD:
3488   case ISD::VECREDUCE_FMIN:
3489   case ISD::VECREDUCE_FMAX:
3490     return lowerFPVECREDUCE(Op, DAG);
3491   case ISD::VP_REDUCE_ADD:
3492   case ISD::VP_REDUCE_UMAX:
3493   case ISD::VP_REDUCE_SMAX:
3494   case ISD::VP_REDUCE_UMIN:
3495   case ISD::VP_REDUCE_SMIN:
3496   case ISD::VP_REDUCE_FADD:
3497   case ISD::VP_REDUCE_SEQ_FADD:
3498   case ISD::VP_REDUCE_FMIN:
3499   case ISD::VP_REDUCE_FMAX:
3500     return lowerVPREDUCE(Op, DAG);
3501   case ISD::VP_REDUCE_AND:
3502   case ISD::VP_REDUCE_OR:
3503   case ISD::VP_REDUCE_XOR:
3504     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3505       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3506     return lowerVPREDUCE(Op, DAG);
3507   case ISD::INSERT_SUBVECTOR:
3508     return lowerINSERT_SUBVECTOR(Op, DAG);
3509   case ISD::EXTRACT_SUBVECTOR:
3510     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3511   case ISD::STEP_VECTOR:
3512     return lowerSTEP_VECTOR(Op, DAG);
3513   case ISD::VECTOR_REVERSE:
3514     return lowerVECTOR_REVERSE(Op, DAG);
3515   case ISD::VECTOR_SPLICE:
3516     return lowerVECTOR_SPLICE(Op, DAG);
3517   case ISD::BUILD_VECTOR:
3518     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3519   case ISD::SPLAT_VECTOR:
3520     if (Op.getValueType().getVectorElementType() == MVT::i1)
3521       return lowerVectorMaskSplat(Op, DAG);
3522     return SDValue();
3523   case ISD::VECTOR_SHUFFLE:
3524     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3525   case ISD::CONCAT_VECTORS: {
3526     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3527     // better than going through the stack, as the default expansion does.
3528     SDLoc DL(Op);
3529     MVT VT = Op.getSimpleValueType();
3530     unsigned NumOpElts =
3531         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3532     SDValue Vec = DAG.getUNDEF(VT);
3533     for (const auto &OpIdx : enumerate(Op->ops())) {
3534       SDValue SubVec = OpIdx.value();
3535       // Don't insert undef subvectors.
3536       if (SubVec.isUndef())
3537         continue;
3538       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3539                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3540     }
3541     return Vec;
3542   }
3543   case ISD::LOAD:
3544     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3545       return V;
3546     if (Op.getValueType().isFixedLengthVector())
3547       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3548     return Op;
3549   case ISD::STORE:
3550     if (auto V = expandUnalignedRVVStore(Op, DAG))
3551       return V;
3552     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3553       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3554     return Op;
3555   case ISD::MLOAD:
3556   case ISD::VP_LOAD:
3557     return lowerMaskedLoad(Op, DAG);
3558   case ISD::MSTORE:
3559   case ISD::VP_STORE:
3560     return lowerMaskedStore(Op, DAG);
3561   case ISD::SETCC:
3562     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3563   case ISD::ADD:
3564     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3565   case ISD::SUB:
3566     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3567   case ISD::MUL:
3568     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3569   case ISD::MULHS:
3570     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3571   case ISD::MULHU:
3572     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3573   case ISD::AND:
3574     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3575                                               RISCVISD::AND_VL);
3576   case ISD::OR:
3577     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3578                                               RISCVISD::OR_VL);
3579   case ISD::XOR:
3580     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3581                                               RISCVISD::XOR_VL);
3582   case ISD::SDIV:
3583     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3584   case ISD::SREM:
3585     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3586   case ISD::UDIV:
3587     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3588   case ISD::UREM:
3589     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3590   case ISD::SHL:
3591   case ISD::SRA:
3592   case ISD::SRL:
3593     if (Op.getSimpleValueType().isFixedLengthVector())
3594       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3595     // This can be called for an i32 shift amount that needs to be promoted.
3596     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3597            "Unexpected custom legalisation");
3598     return SDValue();
3599   case ISD::SADDSAT:
3600     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3601   case ISD::UADDSAT:
3602     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3603   case ISD::SSUBSAT:
3604     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3605   case ISD::USUBSAT:
3606     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3607   case ISD::FADD:
3608     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3609   case ISD::FSUB:
3610     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3611   case ISD::FMUL:
3612     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3613   case ISD::FDIV:
3614     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3615   case ISD::FNEG:
3616     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3617   case ISD::FABS:
3618     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3619   case ISD::FSQRT:
3620     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3621   case ISD::FMA:
3622     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3623   case ISD::SMIN:
3624     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3625   case ISD::SMAX:
3626     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3627   case ISD::UMIN:
3628     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3629   case ISD::UMAX:
3630     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3631   case ISD::FMINNUM:
3632     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3633   case ISD::FMAXNUM:
3634     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3635   case ISD::ABS:
3636     return lowerABS(Op, DAG);
3637   case ISD::CTLZ_ZERO_UNDEF:
3638   case ISD::CTTZ_ZERO_UNDEF:
3639     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3640   case ISD::VSELECT:
3641     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3642   case ISD::FCOPYSIGN:
3643     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3644   case ISD::MGATHER:
3645   case ISD::VP_GATHER:
3646     return lowerMaskedGather(Op, DAG);
3647   case ISD::MSCATTER:
3648   case ISD::VP_SCATTER:
3649     return lowerMaskedScatter(Op, DAG);
3650   case ISD::FLT_ROUNDS_:
3651     return lowerGET_ROUNDING(Op, DAG);
3652   case ISD::SET_ROUNDING:
3653     return lowerSET_ROUNDING(Op, DAG);
3654   case ISD::VP_SELECT:
3655     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3656   case ISD::VP_MERGE:
3657     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3658   case ISD::VP_ADD:
3659     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3660   case ISD::VP_SUB:
3661     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3662   case ISD::VP_MUL:
3663     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3664   case ISD::VP_SDIV:
3665     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3666   case ISD::VP_UDIV:
3667     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3668   case ISD::VP_SREM:
3669     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3670   case ISD::VP_UREM:
3671     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3672   case ISD::VP_AND:
3673     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3674   case ISD::VP_OR:
3675     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3676   case ISD::VP_XOR:
3677     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3678   case ISD::VP_ASHR:
3679     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3680   case ISD::VP_LSHR:
3681     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3682   case ISD::VP_SHL:
3683     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3684   case ISD::VP_FADD:
3685     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3686   case ISD::VP_FSUB:
3687     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3688   case ISD::VP_FMUL:
3689     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3690   case ISD::VP_FDIV:
3691     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3692   case ISD::VP_FNEG:
3693     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3694   case ISD::VP_FMA:
3695     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3696   }
3697 }
3698 
3699 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3700                              SelectionDAG &DAG, unsigned Flags) {
3701   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3702 }
3703 
3704 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3705                              SelectionDAG &DAG, unsigned Flags) {
3706   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3707                                    Flags);
3708 }
3709 
3710 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3711                              SelectionDAG &DAG, unsigned Flags) {
3712   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3713                                    N->getOffset(), Flags);
3714 }
3715 
3716 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3717                              SelectionDAG &DAG, unsigned Flags) {
3718   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3719 }
3720 
3721 template <class NodeTy>
3722 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3723                                      bool IsLocal) const {
3724   SDLoc DL(N);
3725   EVT Ty = getPointerTy(DAG.getDataLayout());
3726 
3727   if (isPositionIndependent()) {
3728     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3729     if (IsLocal)
3730       // Use PC-relative addressing to access the symbol. This generates the
3731       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3732       // %pcrel_lo(auipc)).
3733       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3734 
3735     // Use PC-relative addressing to access the GOT for this symbol, then load
3736     // the address from the GOT. This generates the pattern (PseudoLA sym),
3737     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3738     SDValue Load =
3739         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3740     MachineFunction &MF = DAG.getMachineFunction();
3741     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3742         MachinePointerInfo::getGOT(MF),
3743         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3744             MachineMemOperand::MOInvariant,
3745         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3746     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3747     return Load;
3748   }
3749 
3750   switch (getTargetMachine().getCodeModel()) {
3751   default:
3752     report_fatal_error("Unsupported code model for lowering");
3753   case CodeModel::Small: {
3754     // Generate a sequence for accessing addresses within the first 2 GiB of
3755     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3756     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3757     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3758     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3759     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3760   }
3761   case CodeModel::Medium: {
3762     // Generate a sequence for accessing addresses within any 2GiB range within
3763     // the address space. This generates the pattern (PseudoLLA sym), which
3764     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3765     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3766     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3767   }
3768   }
3769 }
3770 
3771 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3772     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3773 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3774     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3775 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3776     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3777 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3778     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3779 
3780 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3781                                                 SelectionDAG &DAG) const {
3782   SDLoc DL(Op);
3783   EVT Ty = Op.getValueType();
3784   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3785   int64_t Offset = N->getOffset();
3786   MVT XLenVT = Subtarget.getXLenVT();
3787 
3788   const GlobalValue *GV = N->getGlobal();
3789   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3790   SDValue Addr = getAddr(N, DAG, IsLocal);
3791 
3792   // In order to maximise the opportunity for common subexpression elimination,
3793   // emit a separate ADD node for the global address offset instead of folding
3794   // it in the global address node. Later peephole optimisations may choose to
3795   // fold it back in when profitable.
3796   if (Offset != 0)
3797     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3798                        DAG.getConstant(Offset, DL, XLenVT));
3799   return Addr;
3800 }
3801 
3802 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3803                                                SelectionDAG &DAG) const {
3804   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3805 
3806   return getAddr(N, DAG);
3807 }
3808 
3809 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3810                                                SelectionDAG &DAG) const {
3811   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3812 
3813   return getAddr(N, DAG);
3814 }
3815 
3816 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3817                                             SelectionDAG &DAG) const {
3818   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3819 
3820   return getAddr(N, DAG);
3821 }
3822 
3823 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3824                                               SelectionDAG &DAG,
3825                                               bool UseGOT) const {
3826   SDLoc DL(N);
3827   EVT Ty = getPointerTy(DAG.getDataLayout());
3828   const GlobalValue *GV = N->getGlobal();
3829   MVT XLenVT = Subtarget.getXLenVT();
3830 
3831   if (UseGOT) {
3832     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3833     // load the address from the GOT and add the thread pointer. This generates
3834     // the pattern (PseudoLA_TLS_IE sym), which expands to
3835     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3836     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3837     SDValue Load =
3838         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3839     MachineFunction &MF = DAG.getMachineFunction();
3840     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3841         MachinePointerInfo::getGOT(MF),
3842         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3843             MachineMemOperand::MOInvariant,
3844         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3845     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3846 
3847     // Add the thread pointer.
3848     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3849     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3850   }
3851 
3852   // Generate a sequence for accessing the address relative to the thread
3853   // pointer, with the appropriate adjustment for the thread pointer offset.
3854   // This generates the pattern
3855   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3856   SDValue AddrHi =
3857       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3858   SDValue AddrAdd =
3859       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3860   SDValue AddrLo =
3861       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3862 
3863   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3864   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3865   SDValue MNAdd = SDValue(
3866       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3867       0);
3868   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3869 }
3870 
3871 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3872                                                SelectionDAG &DAG) const {
3873   SDLoc DL(N);
3874   EVT Ty = getPointerTy(DAG.getDataLayout());
3875   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3876   const GlobalValue *GV = N->getGlobal();
3877 
3878   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3879   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3880   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3881   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3882   SDValue Load =
3883       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3884 
3885   // Prepare argument list to generate call.
3886   ArgListTy Args;
3887   ArgListEntry Entry;
3888   Entry.Node = Load;
3889   Entry.Ty = CallTy;
3890   Args.push_back(Entry);
3891 
3892   // Setup call to __tls_get_addr.
3893   TargetLowering::CallLoweringInfo CLI(DAG);
3894   CLI.setDebugLoc(DL)
3895       .setChain(DAG.getEntryNode())
3896       .setLibCallee(CallingConv::C, CallTy,
3897                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3898                     std::move(Args));
3899 
3900   return LowerCallTo(CLI).first;
3901 }
3902 
3903 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3904                                                    SelectionDAG &DAG) const {
3905   SDLoc DL(Op);
3906   EVT Ty = Op.getValueType();
3907   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3908   int64_t Offset = N->getOffset();
3909   MVT XLenVT = Subtarget.getXLenVT();
3910 
3911   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3912 
3913   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3914       CallingConv::GHC)
3915     report_fatal_error("In GHC calling convention TLS is not supported");
3916 
3917   SDValue Addr;
3918   switch (Model) {
3919   case TLSModel::LocalExec:
3920     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3921     break;
3922   case TLSModel::InitialExec:
3923     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3924     break;
3925   case TLSModel::LocalDynamic:
3926   case TLSModel::GeneralDynamic:
3927     Addr = getDynamicTLSAddr(N, DAG);
3928     break;
3929   }
3930 
3931   // In order to maximise the opportunity for common subexpression elimination,
3932   // emit a separate ADD node for the global address offset instead of folding
3933   // it in the global address node. Later peephole optimisations may choose to
3934   // fold it back in when profitable.
3935   if (Offset != 0)
3936     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3937                        DAG.getConstant(Offset, DL, XLenVT));
3938   return Addr;
3939 }
3940 
3941 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3942   SDValue CondV = Op.getOperand(0);
3943   SDValue TrueV = Op.getOperand(1);
3944   SDValue FalseV = Op.getOperand(2);
3945   SDLoc DL(Op);
3946   MVT VT = Op.getSimpleValueType();
3947   MVT XLenVT = Subtarget.getXLenVT();
3948 
3949   // Lower vector SELECTs to VSELECTs by splatting the condition.
3950   if (VT.isVector()) {
3951     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3952     SDValue CondSplat = VT.isScalableVector()
3953                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3954                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3955     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3956   }
3957 
3958   // If the result type is XLenVT and CondV is the output of a SETCC node
3959   // which also operated on XLenVT inputs, then merge the SETCC node into the
3960   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3961   // compare+branch instructions. i.e.:
3962   // (select (setcc lhs, rhs, cc), truev, falsev)
3963   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3964   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3965       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3966     SDValue LHS = CondV.getOperand(0);
3967     SDValue RHS = CondV.getOperand(1);
3968     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3969     ISD::CondCode CCVal = CC->get();
3970 
3971     // Special case for a select of 2 constants that have a diffence of 1.
3972     // Normally this is done by DAGCombine, but if the select is introduced by
3973     // type legalization or op legalization, we miss it. Restricting to SETLT
3974     // case for now because that is what signed saturating add/sub need.
3975     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3976     // but we would probably want to swap the true/false values if the condition
3977     // is SETGE/SETLE to avoid an XORI.
3978     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3979         CCVal == ISD::SETLT) {
3980       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3981       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3982       if (TrueVal - 1 == FalseVal)
3983         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3984       if (TrueVal + 1 == FalseVal)
3985         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3986     }
3987 
3988     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3989 
3990     SDValue TargetCC = DAG.getCondCode(CCVal);
3991     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3992     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3993   }
3994 
3995   // Otherwise:
3996   // (select condv, truev, falsev)
3997   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3998   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3999   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
4000 
4001   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
4002 
4003   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
4004 }
4005 
4006 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
4007   SDValue CondV = Op.getOperand(1);
4008   SDLoc DL(Op);
4009   MVT XLenVT = Subtarget.getXLenVT();
4010 
4011   if (CondV.getOpcode() == ISD::SETCC &&
4012       CondV.getOperand(0).getValueType() == XLenVT) {
4013     SDValue LHS = CondV.getOperand(0);
4014     SDValue RHS = CondV.getOperand(1);
4015     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
4016 
4017     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4018 
4019     SDValue TargetCC = DAG.getCondCode(CCVal);
4020     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4021                        LHS, RHS, TargetCC, Op.getOperand(2));
4022   }
4023 
4024   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4025                      CondV, DAG.getConstant(0, DL, XLenVT),
4026                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
4027 }
4028 
4029 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
4030   MachineFunction &MF = DAG.getMachineFunction();
4031   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
4032 
4033   SDLoc DL(Op);
4034   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
4035                                  getPointerTy(MF.getDataLayout()));
4036 
4037   // vastart just stores the address of the VarArgsFrameIndex slot into the
4038   // memory location argument.
4039   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4040   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
4041                       MachinePointerInfo(SV));
4042 }
4043 
4044 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
4045                                             SelectionDAG &DAG) const {
4046   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4047   MachineFunction &MF = DAG.getMachineFunction();
4048   MachineFrameInfo &MFI = MF.getFrameInfo();
4049   MFI.setFrameAddressIsTaken(true);
4050   Register FrameReg = RI.getFrameRegister(MF);
4051   int XLenInBytes = Subtarget.getXLen() / 8;
4052 
4053   EVT VT = Op.getValueType();
4054   SDLoc DL(Op);
4055   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
4056   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4057   while (Depth--) {
4058     int Offset = -(XLenInBytes * 2);
4059     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
4060                               DAG.getIntPtrConstant(Offset, DL));
4061     FrameAddr =
4062         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
4063   }
4064   return FrameAddr;
4065 }
4066 
4067 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
4068                                              SelectionDAG &DAG) const {
4069   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4070   MachineFunction &MF = DAG.getMachineFunction();
4071   MachineFrameInfo &MFI = MF.getFrameInfo();
4072   MFI.setReturnAddressIsTaken(true);
4073   MVT XLenVT = Subtarget.getXLenVT();
4074   int XLenInBytes = Subtarget.getXLen() / 8;
4075 
4076   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4077     return SDValue();
4078 
4079   EVT VT = Op.getValueType();
4080   SDLoc DL(Op);
4081   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4082   if (Depth) {
4083     int Off = -XLenInBytes;
4084     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4085     SDValue Offset = DAG.getConstant(Off, DL, VT);
4086     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4087                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4088                        MachinePointerInfo());
4089   }
4090 
4091   // Return the value of the return address register, marking it an implicit
4092   // live-in.
4093   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4094   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4095 }
4096 
4097 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4098                                                  SelectionDAG &DAG) const {
4099   SDLoc DL(Op);
4100   SDValue Lo = Op.getOperand(0);
4101   SDValue Hi = Op.getOperand(1);
4102   SDValue Shamt = Op.getOperand(2);
4103   EVT VT = Lo.getValueType();
4104 
4105   // if Shamt-XLEN < 0: // Shamt < XLEN
4106   //   Lo = Lo << Shamt
4107   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
4108   // else:
4109   //   Lo = 0
4110   //   Hi = Lo << (Shamt-XLEN)
4111 
4112   SDValue Zero = DAG.getConstant(0, DL, VT);
4113   SDValue One = DAG.getConstant(1, DL, VT);
4114   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4115   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4116   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4117   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4118 
4119   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4120   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4121   SDValue ShiftRightLo =
4122       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4123   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4124   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4125   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4126 
4127   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4128 
4129   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4130   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4131 
4132   SDValue Parts[2] = {Lo, Hi};
4133   return DAG.getMergeValues(Parts, DL);
4134 }
4135 
4136 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4137                                                   bool IsSRA) const {
4138   SDLoc DL(Op);
4139   SDValue Lo = Op.getOperand(0);
4140   SDValue Hi = Op.getOperand(1);
4141   SDValue Shamt = Op.getOperand(2);
4142   EVT VT = Lo.getValueType();
4143 
4144   // SRA expansion:
4145   //   if Shamt-XLEN < 0: // Shamt < XLEN
4146   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4147   //     Hi = Hi >>s Shamt
4148   //   else:
4149   //     Lo = Hi >>s (Shamt-XLEN);
4150   //     Hi = Hi >>s (XLEN-1)
4151   //
4152   // SRL expansion:
4153   //   if Shamt-XLEN < 0: // Shamt < XLEN
4154   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4155   //     Hi = Hi >>u Shamt
4156   //   else:
4157   //     Lo = Hi >>u (Shamt-XLEN);
4158   //     Hi = 0;
4159 
4160   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4161 
4162   SDValue Zero = DAG.getConstant(0, DL, VT);
4163   SDValue One = DAG.getConstant(1, DL, VT);
4164   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4165   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4166   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4167   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4168 
4169   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4170   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4171   SDValue ShiftLeftHi =
4172       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4173   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4174   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4175   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4176   SDValue HiFalse =
4177       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4178 
4179   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4180 
4181   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4182   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4183 
4184   SDValue Parts[2] = {Lo, Hi};
4185   return DAG.getMergeValues(Parts, DL);
4186 }
4187 
4188 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4189 // legal equivalently-sized i8 type, so we can use that as a go-between.
4190 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4191                                                   SelectionDAG &DAG) const {
4192   SDLoc DL(Op);
4193   MVT VT = Op.getSimpleValueType();
4194   SDValue SplatVal = Op.getOperand(0);
4195   // All-zeros or all-ones splats are handled specially.
4196   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4197     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4198     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4199   }
4200   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4201     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4202     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4203   }
4204   MVT XLenVT = Subtarget.getXLenVT();
4205   assert(SplatVal.getValueType() == XLenVT &&
4206          "Unexpected type for i1 splat value");
4207   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4208   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4209                          DAG.getConstant(1, DL, XLenVT));
4210   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4211   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4212   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4213 }
4214 
4215 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4216 // illegal (currently only vXi64 RV32).
4217 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4218 // them to VMV_V_X_VL.
4219 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4220                                                      SelectionDAG &DAG) const {
4221   SDLoc DL(Op);
4222   MVT VecVT = Op.getSimpleValueType();
4223   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4224          "Unexpected SPLAT_VECTOR_PARTS lowering");
4225 
4226   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4227   SDValue Lo = Op.getOperand(0);
4228   SDValue Hi = Op.getOperand(1);
4229 
4230   if (VecVT.isFixedLengthVector()) {
4231     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4232     SDLoc DL(Op);
4233     SDValue Mask, VL;
4234     std::tie(Mask, VL) =
4235         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4236 
4237     SDValue Res =
4238         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4239     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4240   }
4241 
4242   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4243     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4244     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4245     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4246     // node in order to try and match RVV vector/scalar instructions.
4247     if ((LoC >> 31) == HiC)
4248       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4249                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4250   }
4251 
4252   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4253   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4254       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4255       Hi.getConstantOperandVal(1) == 31)
4256     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4257                        DAG.getRegister(RISCV::X0, MVT::i32));
4258 
4259   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4260   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4261                      DAG.getUNDEF(VecVT), Lo, Hi,
4262                      DAG.getRegister(RISCV::X0, MVT::i32));
4263 }
4264 
4265 // Custom-lower extensions from mask vectors by using a vselect either with 1
4266 // for zero/any-extension or -1 for sign-extension:
4267 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4268 // Note that any-extension is lowered identically to zero-extension.
4269 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4270                                                 int64_t ExtTrueVal) const {
4271   SDLoc DL(Op);
4272   MVT VecVT = Op.getSimpleValueType();
4273   SDValue Src = Op.getOperand(0);
4274   // Only custom-lower extensions from mask types
4275   assert(Src.getValueType().isVector() &&
4276          Src.getValueType().getVectorElementType() == MVT::i1);
4277 
4278   if (VecVT.isScalableVector()) {
4279     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4280     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4281     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4282   }
4283 
4284   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4285   MVT I1ContainerVT =
4286       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4287 
4288   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4289 
4290   SDValue Mask, VL;
4291   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4292 
4293   MVT XLenVT = Subtarget.getXLenVT();
4294   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4295   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4296 
4297   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4298                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4299   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4300                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4301   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4302                                SplatTrueVal, SplatZero, VL);
4303 
4304   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4305 }
4306 
4307 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4308     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4309   MVT ExtVT = Op.getSimpleValueType();
4310   // Only custom-lower extensions from fixed-length vector types.
4311   if (!ExtVT.isFixedLengthVector())
4312     return Op;
4313   MVT VT = Op.getOperand(0).getSimpleValueType();
4314   // Grab the canonical container type for the extended type. Infer the smaller
4315   // type from that to ensure the same number of vector elements, as we know
4316   // the LMUL will be sufficient to hold the smaller type.
4317   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4318   // Get the extended container type manually to ensure the same number of
4319   // vector elements between source and dest.
4320   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4321                                      ContainerExtVT.getVectorElementCount());
4322 
4323   SDValue Op1 =
4324       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4325 
4326   SDLoc DL(Op);
4327   SDValue Mask, VL;
4328   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4329 
4330   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4331 
4332   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4333 }
4334 
4335 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4336 // setcc operation:
4337 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4338 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4339                                                   SelectionDAG &DAG) const {
4340   SDLoc DL(Op);
4341   EVT MaskVT = Op.getValueType();
4342   // Only expect to custom-lower truncations to mask types
4343   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4344          "Unexpected type for vector mask lowering");
4345   SDValue Src = Op.getOperand(0);
4346   MVT VecVT = Src.getSimpleValueType();
4347 
4348   // If this is a fixed vector, we need to convert it to a scalable vector.
4349   MVT ContainerVT = VecVT;
4350   if (VecVT.isFixedLengthVector()) {
4351     ContainerVT = getContainerForFixedLengthVector(VecVT);
4352     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4353   }
4354 
4355   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4356   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4357 
4358   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4359                          DAG.getUNDEF(ContainerVT), SplatOne);
4360   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4361                           DAG.getUNDEF(ContainerVT), SplatZero);
4362 
4363   if (VecVT.isScalableVector()) {
4364     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4365     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4366   }
4367 
4368   SDValue Mask, VL;
4369   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4370 
4371   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4372   SDValue Trunc =
4373       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4374   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4375                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4376   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4377 }
4378 
4379 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4380 // first position of a vector, and that vector is slid up to the insert index.
4381 // By limiting the active vector length to index+1 and merging with the
4382 // original vector (with an undisturbed tail policy for elements >= VL), we
4383 // achieve the desired result of leaving all elements untouched except the one
4384 // at VL-1, which is replaced with the desired value.
4385 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4386                                                     SelectionDAG &DAG) const {
4387   SDLoc DL(Op);
4388   MVT VecVT = Op.getSimpleValueType();
4389   SDValue Vec = Op.getOperand(0);
4390   SDValue Val = Op.getOperand(1);
4391   SDValue Idx = Op.getOperand(2);
4392 
4393   if (VecVT.getVectorElementType() == MVT::i1) {
4394     // FIXME: For now we just promote to an i8 vector and insert into that,
4395     // but this is probably not optimal.
4396     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4397     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4398     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4399     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4400   }
4401 
4402   MVT ContainerVT = VecVT;
4403   // If the operand is a fixed-length vector, convert to a scalable one.
4404   if (VecVT.isFixedLengthVector()) {
4405     ContainerVT = getContainerForFixedLengthVector(VecVT);
4406     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4407   }
4408 
4409   MVT XLenVT = Subtarget.getXLenVT();
4410 
4411   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4412   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4413   // Even i64-element vectors on RV32 can be lowered without scalar
4414   // legalization if the most-significant 32 bits of the value are not affected
4415   // by the sign-extension of the lower 32 bits.
4416   // TODO: We could also catch sign extensions of a 32-bit value.
4417   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4418     const auto *CVal = cast<ConstantSDNode>(Val);
4419     if (isInt<32>(CVal->getSExtValue())) {
4420       IsLegalInsert = true;
4421       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4422     }
4423   }
4424 
4425   SDValue Mask, VL;
4426   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4427 
4428   SDValue ValInVec;
4429 
4430   if (IsLegalInsert) {
4431     unsigned Opc =
4432         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4433     if (isNullConstant(Idx)) {
4434       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4435       if (!VecVT.isFixedLengthVector())
4436         return Vec;
4437       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4438     }
4439     ValInVec =
4440         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4441   } else {
4442     // On RV32, i64-element vectors must be specially handled to place the
4443     // value at element 0, by using two vslide1up instructions in sequence on
4444     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4445     // this.
4446     SDValue One = DAG.getConstant(1, DL, XLenVT);
4447     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4448     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4449     MVT I32ContainerVT =
4450         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4451     SDValue I32Mask =
4452         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4453     // Limit the active VL to two.
4454     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4455     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4456     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4457     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4458                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4459     // First slide in the hi value, then the lo in underneath it.
4460     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4461                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4462                            I32Mask, InsertI64VL);
4463     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4464                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4465                            I32Mask, InsertI64VL);
4466     // Bitcast back to the right container type.
4467     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4468   }
4469 
4470   // Now that the value is in a vector, slide it into position.
4471   SDValue InsertVL =
4472       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4473   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4474                                 ValInVec, Idx, Mask, InsertVL);
4475   if (!VecVT.isFixedLengthVector())
4476     return Slideup;
4477   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4478 }
4479 
4480 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4481 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4482 // types this is done using VMV_X_S to allow us to glean information about the
4483 // sign bits of the result.
4484 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4485                                                      SelectionDAG &DAG) const {
4486   SDLoc DL(Op);
4487   SDValue Idx = Op.getOperand(1);
4488   SDValue Vec = Op.getOperand(0);
4489   EVT EltVT = Op.getValueType();
4490   MVT VecVT = Vec.getSimpleValueType();
4491   MVT XLenVT = Subtarget.getXLenVT();
4492 
4493   if (VecVT.getVectorElementType() == MVT::i1) {
4494     if (VecVT.isFixedLengthVector()) {
4495       unsigned NumElts = VecVT.getVectorNumElements();
4496       if (NumElts >= 8) {
4497         MVT WideEltVT;
4498         unsigned WidenVecLen;
4499         SDValue ExtractElementIdx;
4500         SDValue ExtractBitIdx;
4501         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4502         MVT LargestEltVT = MVT::getIntegerVT(
4503             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4504         if (NumElts <= LargestEltVT.getSizeInBits()) {
4505           assert(isPowerOf2_32(NumElts) &&
4506                  "the number of elements should be power of 2");
4507           WideEltVT = MVT::getIntegerVT(NumElts);
4508           WidenVecLen = 1;
4509           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4510           ExtractBitIdx = Idx;
4511         } else {
4512           WideEltVT = LargestEltVT;
4513           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4514           // extract element index = index / element width
4515           ExtractElementIdx = DAG.getNode(
4516               ISD::SRL, DL, XLenVT, Idx,
4517               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4518           // mask bit index = index % element width
4519           ExtractBitIdx = DAG.getNode(
4520               ISD::AND, DL, XLenVT, Idx,
4521               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4522         }
4523         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4524         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4525         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4526                                          Vec, ExtractElementIdx);
4527         // Extract the bit from GPR.
4528         SDValue ShiftRight =
4529             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4530         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4531                            DAG.getConstant(1, DL, XLenVT));
4532       }
4533     }
4534     // Otherwise, promote to an i8 vector and extract from that.
4535     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4536     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4537     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4538   }
4539 
4540   // If this is a fixed vector, we need to convert it to a scalable vector.
4541   MVT ContainerVT = VecVT;
4542   if (VecVT.isFixedLengthVector()) {
4543     ContainerVT = getContainerForFixedLengthVector(VecVT);
4544     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4545   }
4546 
4547   // If the index is 0, the vector is already in the right position.
4548   if (!isNullConstant(Idx)) {
4549     // Use a VL of 1 to avoid processing more elements than we need.
4550     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4551     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4552     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4553     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4554                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4555   }
4556 
4557   if (!EltVT.isInteger()) {
4558     // Floating-point extracts are handled in TableGen.
4559     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4560                        DAG.getConstant(0, DL, XLenVT));
4561   }
4562 
4563   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4564   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4565 }
4566 
4567 // Some RVV intrinsics may claim that they want an integer operand to be
4568 // promoted or expanded.
4569 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4570                                            const RISCVSubtarget &Subtarget) {
4571   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4572           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4573          "Unexpected opcode");
4574 
4575   if (!Subtarget.hasVInstructions())
4576     return SDValue();
4577 
4578   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4579   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4580   SDLoc DL(Op);
4581 
4582   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4583       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4584   if (!II || !II->hasScalarOperand())
4585     return SDValue();
4586 
4587   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4588   assert(SplatOp < Op.getNumOperands());
4589 
4590   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4591   SDValue &ScalarOp = Operands[SplatOp];
4592   MVT OpVT = ScalarOp.getSimpleValueType();
4593   MVT XLenVT = Subtarget.getXLenVT();
4594 
4595   // If this isn't a scalar, or its type is XLenVT we're done.
4596   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4597     return SDValue();
4598 
4599   // Simplest case is that the operand needs to be promoted to XLenVT.
4600   if (OpVT.bitsLT(XLenVT)) {
4601     // If the operand is a constant, sign extend to increase our chances
4602     // of being able to use a .vi instruction. ANY_EXTEND would become a
4603     // a zero extend and the simm5 check in isel would fail.
4604     // FIXME: Should we ignore the upper bits in isel instead?
4605     unsigned ExtOpc =
4606         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4607     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4608     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4609   }
4610 
4611   // Use the previous operand to get the vXi64 VT. The result might be a mask
4612   // VT for compares. Using the previous operand assumes that the previous
4613   // operand will never have a smaller element size than a scalar operand and
4614   // that a widening operation never uses SEW=64.
4615   // NOTE: If this fails the below assert, we can probably just find the
4616   // element count from any operand or result and use it to construct the VT.
4617   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4618   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4619 
4620   // The more complex case is when the scalar is larger than XLenVT.
4621   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4622          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4623 
4624   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4625   // on the instruction to sign-extend since SEW>XLEN.
4626   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4627     if (isInt<32>(CVal->getSExtValue())) {
4628       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4629       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4630     }
4631   }
4632 
4633   switch (IntNo) {
4634   case Intrinsic::riscv_vslide1up:
4635   case Intrinsic::riscv_vslide1down:
4636   case Intrinsic::riscv_vslide1up_mask:
4637   case Intrinsic::riscv_vslide1down_mask: {
4638     // We need to special case these when the scalar is larger than XLen.
4639     unsigned NumOps = Op.getNumOperands();
4640     bool IsMasked = NumOps == 7;
4641 
4642     // Convert the vector source to the equivalent nxvXi32 vector.
4643     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4644     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4645 
4646     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4647                                    DAG.getConstant(0, DL, XLenVT));
4648     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4649                                    DAG.getConstant(1, DL, XLenVT));
4650 
4651     // Double the VL since we halved SEW.
4652     SDValue AVL = getVLOperand(Op);
4653     SDValue I32VL;
4654 
4655     // Optimize for constant AVL
4656     if (isa<ConstantSDNode>(AVL)) {
4657       unsigned EltSize = VT.getScalarSizeInBits();
4658       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4659 
4660       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4661       unsigned MaxVLMAX =
4662           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4663 
4664       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4665       unsigned MinVLMAX =
4666           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4667 
4668       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4669       if (AVLInt <= MinVLMAX) {
4670         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4671       } else if (AVLInt >= 2 * MaxVLMAX) {
4672         // Just set vl to VLMAX in this situation
4673         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4674         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4675         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4676         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4677         SDValue SETVLMAX = DAG.getTargetConstant(
4678             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4679         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4680                             LMUL);
4681       } else {
4682         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4683         // is related to the hardware implementation.
4684         // So let the following code handle
4685       }
4686     }
4687     if (!I32VL) {
4688       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4689       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4690       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4691       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4692       SDValue SETVL =
4693           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4694       // Using vsetvli instruction to get actually used length which related to
4695       // the hardware implementation
4696       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4697                                SEW, LMUL);
4698       I32VL =
4699           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4700     }
4701 
4702     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4703     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, I32VL);
4704 
4705     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4706     // instructions.
4707     SDValue Passthru;
4708     if (IsMasked)
4709       Passthru = DAG.getUNDEF(I32VT);
4710     else
4711       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4712 
4713     if (IntNo == Intrinsic::riscv_vslide1up ||
4714         IntNo == Intrinsic::riscv_vslide1up_mask) {
4715       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4716                         ScalarHi, I32Mask, I32VL);
4717       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4718                         ScalarLo, I32Mask, I32VL);
4719     } else {
4720       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4721                         ScalarLo, I32Mask, I32VL);
4722       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4723                         ScalarHi, I32Mask, I32VL);
4724     }
4725 
4726     // Convert back to nxvXi64.
4727     Vec = DAG.getBitcast(VT, Vec);
4728 
4729     if (!IsMasked)
4730       return Vec;
4731     // Apply mask after the operation.
4732     SDValue Mask = Operands[NumOps - 3];
4733     SDValue MaskedOff = Operands[1];
4734     // Assume Policy operand is the last operand.
4735     uint64_t Policy =
4736         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4737     // We don't need to select maskedoff if it's undef.
4738     if (MaskedOff.isUndef())
4739       return Vec;
4740     // TAMU
4741     if (Policy == RISCVII::TAIL_AGNOSTIC)
4742       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4743                          AVL);
4744     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4745     // It's fine because vmerge does not care mask policy.
4746     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4747                        AVL);
4748   }
4749   }
4750 
4751   // We need to convert the scalar to a splat vector.
4752   // FIXME: Can we implicitly truncate the scalar if it is known to
4753   // be sign extended?
4754   SDValue VL = getVLOperand(Op);
4755   assert(VL.getValueType() == XLenVT);
4756   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4757   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4758 }
4759 
4760 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4761                                                      SelectionDAG &DAG) const {
4762   unsigned IntNo = Op.getConstantOperandVal(0);
4763   SDLoc DL(Op);
4764   MVT XLenVT = Subtarget.getXLenVT();
4765 
4766   switch (IntNo) {
4767   default:
4768     break; // Don't custom lower most intrinsics.
4769   case Intrinsic::thread_pointer: {
4770     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4771     return DAG.getRegister(RISCV::X4, PtrVT);
4772   }
4773   case Intrinsic::riscv_orc_b:
4774   case Intrinsic::riscv_brev8: {
4775     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4776     unsigned Opc =
4777         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4778     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4779                        DAG.getConstant(7, DL, XLenVT));
4780   }
4781   case Intrinsic::riscv_grev:
4782   case Intrinsic::riscv_gorc: {
4783     unsigned Opc =
4784         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4785     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4786   }
4787   case Intrinsic::riscv_zip:
4788   case Intrinsic::riscv_unzip: {
4789     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4790     // For i32 the immediate is 15. For i64 the immediate is 31.
4791     unsigned Opc =
4792         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4793     unsigned BitWidth = Op.getValueSizeInBits();
4794     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4795     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4796                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4797   }
4798   case Intrinsic::riscv_shfl:
4799   case Intrinsic::riscv_unshfl: {
4800     unsigned Opc =
4801         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4802     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4803   }
4804   case Intrinsic::riscv_bcompress:
4805   case Intrinsic::riscv_bdecompress: {
4806     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4807                                                        : RISCVISD::BDECOMPRESS;
4808     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4809   }
4810   case Intrinsic::riscv_bfp:
4811     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4812                        Op.getOperand(2));
4813   case Intrinsic::riscv_fsl:
4814     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4815                        Op.getOperand(2), Op.getOperand(3));
4816   case Intrinsic::riscv_fsr:
4817     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4818                        Op.getOperand(2), Op.getOperand(3));
4819   case Intrinsic::riscv_vmv_x_s:
4820     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4821     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4822                        Op.getOperand(1));
4823   case Intrinsic::riscv_vmv_v_x:
4824     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4825                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4826                             Subtarget);
4827   case Intrinsic::riscv_vfmv_v_f:
4828     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4829                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4830   case Intrinsic::riscv_vmv_s_x: {
4831     SDValue Scalar = Op.getOperand(2);
4832 
4833     if (Scalar.getValueType().bitsLE(XLenVT)) {
4834       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4835       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4836                          Op.getOperand(1), Scalar, Op.getOperand(3));
4837     }
4838 
4839     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4840 
4841     // This is an i64 value that lives in two scalar registers. We have to
4842     // insert this in a convoluted way. First we build vXi64 splat containing
4843     // the two values that we assemble using some bit math. Next we'll use
4844     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4845     // to merge element 0 from our splat into the source vector.
4846     // FIXME: This is probably not the best way to do this, but it is
4847     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4848     // point.
4849     //   sw lo, (a0)
4850     //   sw hi, 4(a0)
4851     //   vlse vX, (a0)
4852     //
4853     //   vid.v      vVid
4854     //   vmseq.vx   mMask, vVid, 0
4855     //   vmerge.vvm vDest, vSrc, vVal, mMask
4856     MVT VT = Op.getSimpleValueType();
4857     SDValue Vec = Op.getOperand(1);
4858     SDValue VL = getVLOperand(Op);
4859 
4860     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4861     if (Op.getOperand(1).isUndef())
4862       return SplattedVal;
4863     SDValue SplattedIdx =
4864         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4865                     DAG.getConstant(0, DL, MVT::i32), VL);
4866 
4867     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4868     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4869     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4870     SDValue SelectCond =
4871         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4872                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4873     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4874                        Vec, VL);
4875   }
4876   }
4877 
4878   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4879 }
4880 
4881 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4882                                                     SelectionDAG &DAG) const {
4883   unsigned IntNo = Op.getConstantOperandVal(1);
4884   switch (IntNo) {
4885   default:
4886     break;
4887   case Intrinsic::riscv_masked_strided_load: {
4888     SDLoc DL(Op);
4889     MVT XLenVT = Subtarget.getXLenVT();
4890 
4891     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4892     // the selection of the masked intrinsics doesn't do this for us.
4893     SDValue Mask = Op.getOperand(5);
4894     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4895 
4896     MVT VT = Op->getSimpleValueType(0);
4897     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4898 
4899     SDValue PassThru = Op.getOperand(2);
4900     if (!IsUnmasked) {
4901       MVT MaskVT =
4902           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4903       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4904       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4905     }
4906 
4907     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4908 
4909     SDValue IntID = DAG.getTargetConstant(
4910         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4911         XLenVT);
4912 
4913     auto *Load = cast<MemIntrinsicSDNode>(Op);
4914     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4915     if (IsUnmasked)
4916       Ops.push_back(DAG.getUNDEF(ContainerVT));
4917     else
4918       Ops.push_back(PassThru);
4919     Ops.push_back(Op.getOperand(3)); // Ptr
4920     Ops.push_back(Op.getOperand(4)); // Stride
4921     if (!IsUnmasked)
4922       Ops.push_back(Mask);
4923     Ops.push_back(VL);
4924     if (!IsUnmasked) {
4925       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4926       Ops.push_back(Policy);
4927     }
4928 
4929     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4930     SDValue Result =
4931         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4932                                 Load->getMemoryVT(), Load->getMemOperand());
4933     SDValue Chain = Result.getValue(1);
4934     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4935     return DAG.getMergeValues({Result, Chain}, DL);
4936   }
4937   case Intrinsic::riscv_seg2_load:
4938   case Intrinsic::riscv_seg3_load:
4939   case Intrinsic::riscv_seg4_load:
4940   case Intrinsic::riscv_seg5_load:
4941   case Intrinsic::riscv_seg6_load:
4942   case Intrinsic::riscv_seg7_load:
4943   case Intrinsic::riscv_seg8_load: {
4944     SDLoc DL(Op);
4945     static const Intrinsic::ID VlsegInts[7] = {
4946         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4947         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4948         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4949         Intrinsic::riscv_vlseg8};
4950     unsigned NF = Op->getNumValues() - 1;
4951     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4952     MVT XLenVT = Subtarget.getXLenVT();
4953     MVT VT = Op->getSimpleValueType(0);
4954     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4955 
4956     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4957     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4958     auto *Load = cast<MemIntrinsicSDNode>(Op);
4959     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4960     ContainerVTs.push_back(MVT::Other);
4961     SDVTList VTs = DAG.getVTList(ContainerVTs);
4962     SDValue Result =
4963         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4964                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4965                                 Load->getMemoryVT(), Load->getMemOperand());
4966     SmallVector<SDValue, 9> Results;
4967     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4968       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4969                                                   DAG, Subtarget));
4970     Results.push_back(Result.getValue(NF));
4971     return DAG.getMergeValues(Results, DL);
4972   }
4973   }
4974 
4975   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4976 }
4977 
4978 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4979                                                  SelectionDAG &DAG) const {
4980   unsigned IntNo = Op.getConstantOperandVal(1);
4981   switch (IntNo) {
4982   default:
4983     break;
4984   case Intrinsic::riscv_masked_strided_store: {
4985     SDLoc DL(Op);
4986     MVT XLenVT = Subtarget.getXLenVT();
4987 
4988     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4989     // the selection of the masked intrinsics doesn't do this for us.
4990     SDValue Mask = Op.getOperand(5);
4991     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4992 
4993     SDValue Val = Op.getOperand(2);
4994     MVT VT = Val.getSimpleValueType();
4995     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4996 
4997     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4998     if (!IsUnmasked) {
4999       MVT MaskVT =
5000           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5001       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5002     }
5003 
5004     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5005 
5006     SDValue IntID = DAG.getTargetConstant(
5007         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
5008         XLenVT);
5009 
5010     auto *Store = cast<MemIntrinsicSDNode>(Op);
5011     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
5012     Ops.push_back(Val);
5013     Ops.push_back(Op.getOperand(3)); // Ptr
5014     Ops.push_back(Op.getOperand(4)); // Stride
5015     if (!IsUnmasked)
5016       Ops.push_back(Mask);
5017     Ops.push_back(VL);
5018 
5019     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
5020                                    Ops, Store->getMemoryVT(),
5021                                    Store->getMemOperand());
5022   }
5023   }
5024 
5025   return SDValue();
5026 }
5027 
5028 static MVT getLMUL1VT(MVT VT) {
5029   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
5030          "Unexpected vector MVT");
5031   return MVT::getScalableVectorVT(
5032       VT.getVectorElementType(),
5033       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
5034 }
5035 
5036 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
5037   switch (ISDOpcode) {
5038   default:
5039     llvm_unreachable("Unhandled reduction");
5040   case ISD::VECREDUCE_ADD:
5041     return RISCVISD::VECREDUCE_ADD_VL;
5042   case ISD::VECREDUCE_UMAX:
5043     return RISCVISD::VECREDUCE_UMAX_VL;
5044   case ISD::VECREDUCE_SMAX:
5045     return RISCVISD::VECREDUCE_SMAX_VL;
5046   case ISD::VECREDUCE_UMIN:
5047     return RISCVISD::VECREDUCE_UMIN_VL;
5048   case ISD::VECREDUCE_SMIN:
5049     return RISCVISD::VECREDUCE_SMIN_VL;
5050   case ISD::VECREDUCE_AND:
5051     return RISCVISD::VECREDUCE_AND_VL;
5052   case ISD::VECREDUCE_OR:
5053     return RISCVISD::VECREDUCE_OR_VL;
5054   case ISD::VECREDUCE_XOR:
5055     return RISCVISD::VECREDUCE_XOR_VL;
5056   }
5057 }
5058 
5059 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5060                                                          SelectionDAG &DAG,
5061                                                          bool IsVP) const {
5062   SDLoc DL(Op);
5063   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5064   MVT VecVT = Vec.getSimpleValueType();
5065   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5066           Op.getOpcode() == ISD::VECREDUCE_OR ||
5067           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5068           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5069           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5070           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5071          "Unexpected reduction lowering");
5072 
5073   MVT XLenVT = Subtarget.getXLenVT();
5074   assert(Op.getValueType() == XLenVT &&
5075          "Expected reduction output to be legalized to XLenVT");
5076 
5077   MVT ContainerVT = VecVT;
5078   if (VecVT.isFixedLengthVector()) {
5079     ContainerVT = getContainerForFixedLengthVector(VecVT);
5080     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5081   }
5082 
5083   SDValue Mask, VL;
5084   if (IsVP) {
5085     Mask = Op.getOperand(2);
5086     VL = Op.getOperand(3);
5087   } else {
5088     std::tie(Mask, VL) =
5089         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5090   }
5091 
5092   unsigned BaseOpc;
5093   ISD::CondCode CC;
5094   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5095 
5096   switch (Op.getOpcode()) {
5097   default:
5098     llvm_unreachable("Unhandled reduction");
5099   case ISD::VECREDUCE_AND:
5100   case ISD::VP_REDUCE_AND: {
5101     // vcpop ~x == 0
5102     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5103     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5104     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5105     CC = ISD::SETEQ;
5106     BaseOpc = ISD::AND;
5107     break;
5108   }
5109   case ISD::VECREDUCE_OR:
5110   case ISD::VP_REDUCE_OR:
5111     // vcpop x != 0
5112     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5113     CC = ISD::SETNE;
5114     BaseOpc = ISD::OR;
5115     break;
5116   case ISD::VECREDUCE_XOR:
5117   case ISD::VP_REDUCE_XOR: {
5118     // ((vcpop x) & 1) != 0
5119     SDValue One = DAG.getConstant(1, DL, XLenVT);
5120     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5121     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5122     CC = ISD::SETNE;
5123     BaseOpc = ISD::XOR;
5124     break;
5125   }
5126   }
5127 
5128   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5129 
5130   if (!IsVP)
5131     return SetCC;
5132 
5133   // Now include the start value in the operation.
5134   // Note that we must return the start value when no elements are operated
5135   // upon. The vcpop instructions we've emitted in each case above will return
5136   // 0 for an inactive vector, and so we've already received the neutral value:
5137   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5138   // can simply include the start value.
5139   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5140 }
5141 
5142 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5143                                             SelectionDAG &DAG) const {
5144   SDLoc DL(Op);
5145   SDValue Vec = Op.getOperand(0);
5146   EVT VecEVT = Vec.getValueType();
5147 
5148   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5149 
5150   // Due to ordering in legalize types we may have a vector type that needs to
5151   // be split. Do that manually so we can get down to a legal type.
5152   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5153          TargetLowering::TypeSplitVector) {
5154     SDValue Lo, Hi;
5155     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5156     VecEVT = Lo.getValueType();
5157     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5158   }
5159 
5160   // TODO: The type may need to be widened rather than split. Or widened before
5161   // it can be split.
5162   if (!isTypeLegal(VecEVT))
5163     return SDValue();
5164 
5165   MVT VecVT = VecEVT.getSimpleVT();
5166   MVT VecEltVT = VecVT.getVectorElementType();
5167   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5168 
5169   MVT ContainerVT = VecVT;
5170   if (VecVT.isFixedLengthVector()) {
5171     ContainerVT = getContainerForFixedLengthVector(VecVT);
5172     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5173   }
5174 
5175   MVT M1VT = getLMUL1VT(ContainerVT);
5176   MVT XLenVT = Subtarget.getXLenVT();
5177 
5178   SDValue Mask, VL;
5179   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5180 
5181   SDValue NeutralElem =
5182       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5183   SDValue IdentitySplat =
5184       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5185                        M1VT, DL, DAG, Subtarget);
5186   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5187                                   IdentitySplat, Mask, VL);
5188   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5189                              DAG.getConstant(0, DL, XLenVT));
5190   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5191 }
5192 
5193 // Given a reduction op, this function returns the matching reduction opcode,
5194 // the vector SDValue and the scalar SDValue required to lower this to a
5195 // RISCVISD node.
5196 static std::tuple<unsigned, SDValue, SDValue>
5197 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5198   SDLoc DL(Op);
5199   auto Flags = Op->getFlags();
5200   unsigned Opcode = Op.getOpcode();
5201   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5202   switch (Opcode) {
5203   default:
5204     llvm_unreachable("Unhandled reduction");
5205   case ISD::VECREDUCE_FADD: {
5206     // Use positive zero if we can. It is cheaper to materialize.
5207     SDValue Zero =
5208         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5209     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5210   }
5211   case ISD::VECREDUCE_SEQ_FADD:
5212     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5213                            Op.getOperand(0));
5214   case ISD::VECREDUCE_FMIN:
5215     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5216                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5217   case ISD::VECREDUCE_FMAX:
5218     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5219                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5220   }
5221 }
5222 
5223 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5224                                               SelectionDAG &DAG) const {
5225   SDLoc DL(Op);
5226   MVT VecEltVT = Op.getSimpleValueType();
5227 
5228   unsigned RVVOpcode;
5229   SDValue VectorVal, ScalarVal;
5230   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5231       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5232   MVT VecVT = VectorVal.getSimpleValueType();
5233 
5234   MVT ContainerVT = VecVT;
5235   if (VecVT.isFixedLengthVector()) {
5236     ContainerVT = getContainerForFixedLengthVector(VecVT);
5237     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5238   }
5239 
5240   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5241   MVT XLenVT = Subtarget.getXLenVT();
5242 
5243   SDValue Mask, VL;
5244   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5245 
5246   SDValue ScalarSplat =
5247       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5248                        M1VT, DL, DAG, Subtarget);
5249   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5250                                   VectorVal, ScalarSplat, Mask, VL);
5251   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5252                      DAG.getConstant(0, DL, XLenVT));
5253 }
5254 
5255 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5256   switch (ISDOpcode) {
5257   default:
5258     llvm_unreachable("Unhandled reduction");
5259   case ISD::VP_REDUCE_ADD:
5260     return RISCVISD::VECREDUCE_ADD_VL;
5261   case ISD::VP_REDUCE_UMAX:
5262     return RISCVISD::VECREDUCE_UMAX_VL;
5263   case ISD::VP_REDUCE_SMAX:
5264     return RISCVISD::VECREDUCE_SMAX_VL;
5265   case ISD::VP_REDUCE_UMIN:
5266     return RISCVISD::VECREDUCE_UMIN_VL;
5267   case ISD::VP_REDUCE_SMIN:
5268     return RISCVISD::VECREDUCE_SMIN_VL;
5269   case ISD::VP_REDUCE_AND:
5270     return RISCVISD::VECREDUCE_AND_VL;
5271   case ISD::VP_REDUCE_OR:
5272     return RISCVISD::VECREDUCE_OR_VL;
5273   case ISD::VP_REDUCE_XOR:
5274     return RISCVISD::VECREDUCE_XOR_VL;
5275   case ISD::VP_REDUCE_FADD:
5276     return RISCVISD::VECREDUCE_FADD_VL;
5277   case ISD::VP_REDUCE_SEQ_FADD:
5278     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5279   case ISD::VP_REDUCE_FMAX:
5280     return RISCVISD::VECREDUCE_FMAX_VL;
5281   case ISD::VP_REDUCE_FMIN:
5282     return RISCVISD::VECREDUCE_FMIN_VL;
5283   }
5284 }
5285 
5286 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5287                                            SelectionDAG &DAG) const {
5288   SDLoc DL(Op);
5289   SDValue Vec = Op.getOperand(1);
5290   EVT VecEVT = Vec.getValueType();
5291 
5292   // TODO: The type may need to be widened rather than split. Or widened before
5293   // it can be split.
5294   if (!isTypeLegal(VecEVT))
5295     return SDValue();
5296 
5297   MVT VecVT = VecEVT.getSimpleVT();
5298   MVT VecEltVT = VecVT.getVectorElementType();
5299   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5300 
5301   MVT ContainerVT = VecVT;
5302   if (VecVT.isFixedLengthVector()) {
5303     ContainerVT = getContainerForFixedLengthVector(VecVT);
5304     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5305   }
5306 
5307   SDValue VL = Op.getOperand(3);
5308   SDValue Mask = Op.getOperand(2);
5309 
5310   MVT M1VT = getLMUL1VT(ContainerVT);
5311   MVT XLenVT = Subtarget.getXLenVT();
5312   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5313 
5314   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5315                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5316                                         DL, DAG, Subtarget);
5317   SDValue Reduction =
5318       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5319   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5320                              DAG.getConstant(0, DL, XLenVT));
5321   if (!VecVT.isInteger())
5322     return Elt0;
5323   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5324 }
5325 
5326 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5327                                                    SelectionDAG &DAG) const {
5328   SDValue Vec = Op.getOperand(0);
5329   SDValue SubVec = Op.getOperand(1);
5330   MVT VecVT = Vec.getSimpleValueType();
5331   MVT SubVecVT = SubVec.getSimpleValueType();
5332 
5333   SDLoc DL(Op);
5334   MVT XLenVT = Subtarget.getXLenVT();
5335   unsigned OrigIdx = Op.getConstantOperandVal(2);
5336   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5337 
5338   // We don't have the ability to slide mask vectors up indexed by their i1
5339   // elements; the smallest we can do is i8. Often we are able to bitcast to
5340   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5341   // into a scalable one, we might not necessarily have enough scalable
5342   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5343   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5344       (OrigIdx != 0 || !Vec.isUndef())) {
5345     if (VecVT.getVectorMinNumElements() >= 8 &&
5346         SubVecVT.getVectorMinNumElements() >= 8) {
5347       assert(OrigIdx % 8 == 0 && "Invalid index");
5348       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5349              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5350              "Unexpected mask vector lowering");
5351       OrigIdx /= 8;
5352       SubVecVT =
5353           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5354                            SubVecVT.isScalableVector());
5355       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5356                                VecVT.isScalableVector());
5357       Vec = DAG.getBitcast(VecVT, Vec);
5358       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5359     } else {
5360       // We can't slide this mask vector up indexed by its i1 elements.
5361       // This poses a problem when we wish to insert a scalable vector which
5362       // can't be re-expressed as a larger type. Just choose the slow path and
5363       // extend to a larger type, then truncate back down.
5364       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5365       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5366       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5367       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5368       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5369                         Op.getOperand(2));
5370       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5371       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5372     }
5373   }
5374 
5375   // If the subvector vector is a fixed-length type, we cannot use subregister
5376   // manipulation to simplify the codegen; we don't know which register of a
5377   // LMUL group contains the specific subvector as we only know the minimum
5378   // register size. Therefore we must slide the vector group up the full
5379   // amount.
5380   if (SubVecVT.isFixedLengthVector()) {
5381     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5382       return Op;
5383     MVT ContainerVT = VecVT;
5384     if (VecVT.isFixedLengthVector()) {
5385       ContainerVT = getContainerForFixedLengthVector(VecVT);
5386       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5387     }
5388     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5389                          DAG.getUNDEF(ContainerVT), SubVec,
5390                          DAG.getConstant(0, DL, XLenVT));
5391     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5392       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5393       return DAG.getBitcast(Op.getValueType(), SubVec);
5394     }
5395     SDValue Mask =
5396         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5397     // Set the vector length to only the number of elements we care about. Note
5398     // that for slideup this includes the offset.
5399     SDValue VL =
5400         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5401     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5402     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5403                                   SubVec, SlideupAmt, Mask, VL);
5404     if (VecVT.isFixedLengthVector())
5405       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5406     return DAG.getBitcast(Op.getValueType(), Slideup);
5407   }
5408 
5409   unsigned SubRegIdx, RemIdx;
5410   std::tie(SubRegIdx, RemIdx) =
5411       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5412           VecVT, SubVecVT, OrigIdx, TRI);
5413 
5414   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5415   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5416                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5417                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5418 
5419   // 1. If the Idx has been completely eliminated and this subvector's size is
5420   // a vector register or a multiple thereof, or the surrounding elements are
5421   // undef, then this is a subvector insert which naturally aligns to a vector
5422   // register. These can easily be handled using subregister manipulation.
5423   // 2. If the subvector is smaller than a vector register, then the insertion
5424   // must preserve the undisturbed elements of the register. We do this by
5425   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5426   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5427   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5428   // LMUL=1 type back into the larger vector (resolving to another subregister
5429   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5430   // to avoid allocating a large register group to hold our subvector.
5431   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5432     return Op;
5433 
5434   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5435   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5436   // (in our case undisturbed). This means we can set up a subvector insertion
5437   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5438   // size of the subvector.
5439   MVT InterSubVT = VecVT;
5440   SDValue AlignedExtract = Vec;
5441   unsigned AlignedIdx = OrigIdx - RemIdx;
5442   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5443     InterSubVT = getLMUL1VT(VecVT);
5444     // Extract a subvector equal to the nearest full vector register type. This
5445     // should resolve to a EXTRACT_SUBREG instruction.
5446     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5447                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5448   }
5449 
5450   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5451   // For scalable vectors this must be further multiplied by vscale.
5452   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5453 
5454   SDValue Mask, VL;
5455   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5456 
5457   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5458   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5459   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5460   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5461 
5462   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5463                        DAG.getUNDEF(InterSubVT), SubVec,
5464                        DAG.getConstant(0, DL, XLenVT));
5465 
5466   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5467                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5468 
5469   // If required, insert this subvector back into the correct vector register.
5470   // This should resolve to an INSERT_SUBREG instruction.
5471   if (VecVT.bitsGT(InterSubVT))
5472     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5473                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5474 
5475   // We might have bitcast from a mask type: cast back to the original type if
5476   // required.
5477   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5478 }
5479 
5480 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5481                                                     SelectionDAG &DAG) const {
5482   SDValue Vec = Op.getOperand(0);
5483   MVT SubVecVT = Op.getSimpleValueType();
5484   MVT VecVT = Vec.getSimpleValueType();
5485 
5486   SDLoc DL(Op);
5487   MVT XLenVT = Subtarget.getXLenVT();
5488   unsigned OrigIdx = Op.getConstantOperandVal(1);
5489   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5490 
5491   // We don't have the ability to slide mask vectors down indexed by their i1
5492   // elements; the smallest we can do is i8. Often we are able to bitcast to
5493   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5494   // from a scalable one, we might not necessarily have enough scalable
5495   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5496   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5497     if (VecVT.getVectorMinNumElements() >= 8 &&
5498         SubVecVT.getVectorMinNumElements() >= 8) {
5499       assert(OrigIdx % 8 == 0 && "Invalid index");
5500       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5501              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5502              "Unexpected mask vector lowering");
5503       OrigIdx /= 8;
5504       SubVecVT =
5505           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5506                            SubVecVT.isScalableVector());
5507       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5508                                VecVT.isScalableVector());
5509       Vec = DAG.getBitcast(VecVT, Vec);
5510     } else {
5511       // We can't slide this mask vector down, indexed by its i1 elements.
5512       // This poses a problem when we wish to extract a scalable vector which
5513       // can't be re-expressed as a larger type. Just choose the slow path and
5514       // extend to a larger type, then truncate back down.
5515       // TODO: We could probably improve this when extracting certain fixed
5516       // from fixed, where we can extract as i8 and shift the correct element
5517       // right to reach the desired subvector?
5518       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5519       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5520       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5521       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5522                         Op.getOperand(1));
5523       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5524       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5525     }
5526   }
5527 
5528   // If the subvector vector is a fixed-length type, we cannot use subregister
5529   // manipulation to simplify the codegen; we don't know which register of a
5530   // LMUL group contains the specific subvector as we only know the minimum
5531   // register size. Therefore we must slide the vector group down the full
5532   // amount.
5533   if (SubVecVT.isFixedLengthVector()) {
5534     // With an index of 0 this is a cast-like subvector, which can be performed
5535     // with subregister operations.
5536     if (OrigIdx == 0)
5537       return Op;
5538     MVT ContainerVT = VecVT;
5539     if (VecVT.isFixedLengthVector()) {
5540       ContainerVT = getContainerForFixedLengthVector(VecVT);
5541       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5542     }
5543     SDValue Mask =
5544         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5545     // Set the vector length to only the number of elements we care about. This
5546     // avoids sliding down elements we're going to discard straight away.
5547     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5548     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5549     SDValue Slidedown =
5550         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5551                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5552     // Now we can use a cast-like subvector extract to get the result.
5553     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5554                             DAG.getConstant(0, DL, XLenVT));
5555     return DAG.getBitcast(Op.getValueType(), Slidedown);
5556   }
5557 
5558   unsigned SubRegIdx, RemIdx;
5559   std::tie(SubRegIdx, RemIdx) =
5560       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5561           VecVT, SubVecVT, OrigIdx, TRI);
5562 
5563   // If the Idx has been completely eliminated then this is a subvector extract
5564   // which naturally aligns to a vector register. These can easily be handled
5565   // using subregister manipulation.
5566   if (RemIdx == 0)
5567     return Op;
5568 
5569   // Else we must shift our vector register directly to extract the subvector.
5570   // Do this using VSLIDEDOWN.
5571 
5572   // If the vector type is an LMUL-group type, extract a subvector equal to the
5573   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5574   // instruction.
5575   MVT InterSubVT = VecVT;
5576   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5577     InterSubVT = getLMUL1VT(VecVT);
5578     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5579                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5580   }
5581 
5582   // Slide this vector register down by the desired number of elements in order
5583   // to place the desired subvector starting at element 0.
5584   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5585   // For scalable vectors this must be further multiplied by vscale.
5586   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5587 
5588   SDValue Mask, VL;
5589   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5590   SDValue Slidedown =
5591       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5592                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5593 
5594   // Now the vector is in the right position, extract our final subvector. This
5595   // should resolve to a COPY.
5596   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5597                           DAG.getConstant(0, DL, XLenVT));
5598 
5599   // We might have bitcast from a mask type: cast back to the original type if
5600   // required.
5601   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5602 }
5603 
5604 // Lower step_vector to the vid instruction. Any non-identity step value must
5605 // be accounted for my manual expansion.
5606 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5607                                               SelectionDAG &DAG) const {
5608   SDLoc DL(Op);
5609   MVT VT = Op.getSimpleValueType();
5610   MVT XLenVT = Subtarget.getXLenVT();
5611   SDValue Mask, VL;
5612   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5613   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5614   uint64_t StepValImm = Op.getConstantOperandVal(0);
5615   if (StepValImm != 1) {
5616     if (isPowerOf2_64(StepValImm)) {
5617       SDValue StepVal =
5618           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5619                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5620       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5621     } else {
5622       SDValue StepVal = lowerScalarSplat(
5623           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5624           VL, VT, DL, DAG, Subtarget);
5625       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5626     }
5627   }
5628   return StepVec;
5629 }
5630 
5631 // Implement vector_reverse using vrgather.vv with indices determined by
5632 // subtracting the id of each element from (VLMAX-1). This will convert
5633 // the indices like so:
5634 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5635 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5636 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5637                                                  SelectionDAG &DAG) const {
5638   SDLoc DL(Op);
5639   MVT VecVT = Op.getSimpleValueType();
5640   unsigned EltSize = VecVT.getScalarSizeInBits();
5641   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5642 
5643   unsigned MaxVLMAX = 0;
5644   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5645   if (VectorBitsMax != 0)
5646     MaxVLMAX =
5647         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5648 
5649   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5650   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5651 
5652   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5653   // to use vrgatherei16.vv.
5654   // TODO: It's also possible to use vrgatherei16.vv for other types to
5655   // decrease register width for the index calculation.
5656   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5657     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5658     // Reverse each half, then reassemble them in reverse order.
5659     // NOTE: It's also possible that after splitting that VLMAX no longer
5660     // requires vrgatherei16.vv.
5661     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5662       SDValue Lo, Hi;
5663       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5664       EVT LoVT, HiVT;
5665       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5666       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5667       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5668       // Reassemble the low and high pieces reversed.
5669       // FIXME: This is a CONCAT_VECTORS.
5670       SDValue Res =
5671           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5672                       DAG.getIntPtrConstant(0, DL));
5673       return DAG.getNode(
5674           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5675           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5676     }
5677 
5678     // Just promote the int type to i16 which will double the LMUL.
5679     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5680     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5681   }
5682 
5683   MVT XLenVT = Subtarget.getXLenVT();
5684   SDValue Mask, VL;
5685   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5686 
5687   // Calculate VLMAX-1 for the desired SEW.
5688   unsigned MinElts = VecVT.getVectorMinNumElements();
5689   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5690                               DAG.getConstant(MinElts, DL, XLenVT));
5691   SDValue VLMinus1 =
5692       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5693 
5694   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5695   bool IsRV32E64 =
5696       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5697   SDValue SplatVL;
5698   if (!IsRV32E64)
5699     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5700   else
5701     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5702                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5703 
5704   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5705   SDValue Indices =
5706       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5707 
5708   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5709 }
5710 
5711 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5712                                                 SelectionDAG &DAG) const {
5713   SDLoc DL(Op);
5714   SDValue V1 = Op.getOperand(0);
5715   SDValue V2 = Op.getOperand(1);
5716   MVT XLenVT = Subtarget.getXLenVT();
5717   MVT VecVT = Op.getSimpleValueType();
5718 
5719   unsigned MinElts = VecVT.getVectorMinNumElements();
5720   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5721                               DAG.getConstant(MinElts, DL, XLenVT));
5722 
5723   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5724   SDValue DownOffset, UpOffset;
5725   if (ImmValue >= 0) {
5726     // The operand is a TargetConstant, we need to rebuild it as a regular
5727     // constant.
5728     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5729     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5730   } else {
5731     // The operand is a TargetConstant, we need to rebuild it as a regular
5732     // constant rather than negating the original operand.
5733     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5734     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5735   }
5736 
5737   MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5738   SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VLMax);
5739 
5740   SDValue SlideDown =
5741       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5742                   DownOffset, TrueMask, UpOffset);
5743   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5744                      TrueMask,
5745                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5746 }
5747 
5748 SDValue
5749 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5750                                                      SelectionDAG &DAG) const {
5751   SDLoc DL(Op);
5752   auto *Load = cast<LoadSDNode>(Op);
5753 
5754   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5755                                         Load->getMemoryVT(),
5756                                         *Load->getMemOperand()) &&
5757          "Expecting a correctly-aligned load");
5758 
5759   MVT VT = Op.getSimpleValueType();
5760   MVT XLenVT = Subtarget.getXLenVT();
5761   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5762 
5763   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5764 
5765   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5766   SDValue IntID = DAG.getTargetConstant(
5767       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5768   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5769   if (!IsMaskOp)
5770     Ops.push_back(DAG.getUNDEF(ContainerVT));
5771   Ops.push_back(Load->getBasePtr());
5772   Ops.push_back(VL);
5773   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5774   SDValue NewLoad =
5775       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5776                               Load->getMemoryVT(), Load->getMemOperand());
5777 
5778   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5779   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5780 }
5781 
5782 SDValue
5783 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5784                                                       SelectionDAG &DAG) const {
5785   SDLoc DL(Op);
5786   auto *Store = cast<StoreSDNode>(Op);
5787 
5788   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5789                                         Store->getMemoryVT(),
5790                                         *Store->getMemOperand()) &&
5791          "Expecting a correctly-aligned store");
5792 
5793   SDValue StoreVal = Store->getValue();
5794   MVT VT = StoreVal.getSimpleValueType();
5795   MVT XLenVT = Subtarget.getXLenVT();
5796 
5797   // If the size less than a byte, we need to pad with zeros to make a byte.
5798   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5799     VT = MVT::v8i1;
5800     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5801                            DAG.getConstant(0, DL, VT), StoreVal,
5802                            DAG.getIntPtrConstant(0, DL));
5803   }
5804 
5805   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5806 
5807   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5808 
5809   SDValue NewValue =
5810       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5811 
5812   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5813   SDValue IntID = DAG.getTargetConstant(
5814       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5815   return DAG.getMemIntrinsicNode(
5816       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5817       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5818       Store->getMemoryVT(), Store->getMemOperand());
5819 }
5820 
5821 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5822                                              SelectionDAG &DAG) const {
5823   SDLoc DL(Op);
5824   MVT VT = Op.getSimpleValueType();
5825 
5826   const auto *MemSD = cast<MemSDNode>(Op);
5827   EVT MemVT = MemSD->getMemoryVT();
5828   MachineMemOperand *MMO = MemSD->getMemOperand();
5829   SDValue Chain = MemSD->getChain();
5830   SDValue BasePtr = MemSD->getBasePtr();
5831 
5832   SDValue Mask, PassThru, VL;
5833   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5834     Mask = VPLoad->getMask();
5835     PassThru = DAG.getUNDEF(VT);
5836     VL = VPLoad->getVectorLength();
5837   } else {
5838     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5839     Mask = MLoad->getMask();
5840     PassThru = MLoad->getPassThru();
5841   }
5842 
5843   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5844 
5845   MVT XLenVT = Subtarget.getXLenVT();
5846 
5847   MVT ContainerVT = VT;
5848   if (VT.isFixedLengthVector()) {
5849     ContainerVT = getContainerForFixedLengthVector(VT);
5850     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5851     if (!IsUnmasked) {
5852       MVT MaskVT =
5853           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5854       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5855     }
5856   }
5857 
5858   if (!VL)
5859     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5860 
5861   unsigned IntID =
5862       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5863   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5864   if (IsUnmasked)
5865     Ops.push_back(DAG.getUNDEF(ContainerVT));
5866   else
5867     Ops.push_back(PassThru);
5868   Ops.push_back(BasePtr);
5869   if (!IsUnmasked)
5870     Ops.push_back(Mask);
5871   Ops.push_back(VL);
5872   if (!IsUnmasked)
5873     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5874 
5875   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5876 
5877   SDValue Result =
5878       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5879   Chain = Result.getValue(1);
5880 
5881   if (VT.isFixedLengthVector())
5882     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5883 
5884   return DAG.getMergeValues({Result, Chain}, DL);
5885 }
5886 
5887 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5888                                               SelectionDAG &DAG) const {
5889   SDLoc DL(Op);
5890 
5891   const auto *MemSD = cast<MemSDNode>(Op);
5892   EVT MemVT = MemSD->getMemoryVT();
5893   MachineMemOperand *MMO = MemSD->getMemOperand();
5894   SDValue Chain = MemSD->getChain();
5895   SDValue BasePtr = MemSD->getBasePtr();
5896   SDValue Val, Mask, VL;
5897 
5898   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5899     Val = VPStore->getValue();
5900     Mask = VPStore->getMask();
5901     VL = VPStore->getVectorLength();
5902   } else {
5903     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5904     Val = MStore->getValue();
5905     Mask = MStore->getMask();
5906   }
5907 
5908   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5909 
5910   MVT VT = Val.getSimpleValueType();
5911   MVT XLenVT = Subtarget.getXLenVT();
5912 
5913   MVT ContainerVT = VT;
5914   if (VT.isFixedLengthVector()) {
5915     ContainerVT = getContainerForFixedLengthVector(VT);
5916 
5917     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5918     if (!IsUnmasked) {
5919       MVT MaskVT =
5920           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5921       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5922     }
5923   }
5924 
5925   if (!VL)
5926     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5927 
5928   unsigned IntID =
5929       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5930   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5931   Ops.push_back(Val);
5932   Ops.push_back(BasePtr);
5933   if (!IsUnmasked)
5934     Ops.push_back(Mask);
5935   Ops.push_back(VL);
5936 
5937   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5938                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5939 }
5940 
5941 SDValue
5942 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5943                                                       SelectionDAG &DAG) const {
5944   MVT InVT = Op.getOperand(0).getSimpleValueType();
5945   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5946 
5947   MVT VT = Op.getSimpleValueType();
5948 
5949   SDValue Op1 =
5950       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5951   SDValue Op2 =
5952       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5953 
5954   SDLoc DL(Op);
5955   SDValue VL =
5956       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5957 
5958   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5959   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5960 
5961   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5962                             Op.getOperand(2), Mask, VL);
5963 
5964   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5965 }
5966 
5967 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5968     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5969   MVT VT = Op.getSimpleValueType();
5970 
5971   if (VT.getVectorElementType() == MVT::i1)
5972     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5973 
5974   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5975 }
5976 
5977 SDValue
5978 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5979                                                       SelectionDAG &DAG) const {
5980   unsigned Opc;
5981   switch (Op.getOpcode()) {
5982   default: llvm_unreachable("Unexpected opcode!");
5983   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5984   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5985   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5986   }
5987 
5988   return lowerToScalableOp(Op, DAG, Opc);
5989 }
5990 
5991 // Lower vector ABS to smax(X, sub(0, X)).
5992 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5993   SDLoc DL(Op);
5994   MVT VT = Op.getSimpleValueType();
5995   SDValue X = Op.getOperand(0);
5996 
5997   assert(VT.isFixedLengthVector() && "Unexpected type");
5998 
5999   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6000   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
6001 
6002   SDValue Mask, VL;
6003   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6004 
6005   SDValue SplatZero = DAG.getNode(
6006       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
6007       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
6008   SDValue NegX =
6009       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
6010   SDValue Max =
6011       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
6012 
6013   return convertFromScalableVector(VT, Max, DAG, Subtarget);
6014 }
6015 
6016 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
6017     SDValue Op, SelectionDAG &DAG) const {
6018   SDLoc DL(Op);
6019   MVT VT = Op.getSimpleValueType();
6020   SDValue Mag = Op.getOperand(0);
6021   SDValue Sign = Op.getOperand(1);
6022   assert(Mag.getValueType() == Sign.getValueType() &&
6023          "Can only handle COPYSIGN with matching types.");
6024 
6025   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6026   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
6027   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
6028 
6029   SDValue Mask, VL;
6030   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6031 
6032   SDValue CopySign =
6033       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
6034 
6035   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
6036 }
6037 
6038 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
6039     SDValue Op, SelectionDAG &DAG) const {
6040   MVT VT = Op.getSimpleValueType();
6041   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6042 
6043   MVT I1ContainerVT =
6044       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6045 
6046   SDValue CC =
6047       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6048   SDValue Op1 =
6049       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6050   SDValue Op2 =
6051       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6052 
6053   SDLoc DL(Op);
6054   SDValue Mask, VL;
6055   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6056 
6057   SDValue Select =
6058       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6059 
6060   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6061 }
6062 
6063 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6064                                                unsigned NewOpc,
6065                                                bool HasMask) const {
6066   MVT VT = Op.getSimpleValueType();
6067   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6068 
6069   // Create list of operands by converting existing ones to scalable types.
6070   SmallVector<SDValue, 6> Ops;
6071   for (const SDValue &V : Op->op_values()) {
6072     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6073 
6074     // Pass through non-vector operands.
6075     if (!V.getValueType().isVector()) {
6076       Ops.push_back(V);
6077       continue;
6078     }
6079 
6080     // "cast" fixed length vector to a scalable vector.
6081     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6082            "Only fixed length vectors are supported!");
6083     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6084   }
6085 
6086   SDLoc DL(Op);
6087   SDValue Mask, VL;
6088   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6089   if (HasMask)
6090     Ops.push_back(Mask);
6091   Ops.push_back(VL);
6092 
6093   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6094   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6095 }
6096 
6097 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6098 // * Operands of each node are assumed to be in the same order.
6099 // * The EVL operand is promoted from i32 to i64 on RV64.
6100 // * Fixed-length vectors are converted to their scalable-vector container
6101 //   types.
6102 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6103                                        unsigned RISCVISDOpc) const {
6104   SDLoc DL(Op);
6105   MVT VT = Op.getSimpleValueType();
6106   SmallVector<SDValue, 4> Ops;
6107 
6108   for (const auto &OpIdx : enumerate(Op->ops())) {
6109     SDValue V = OpIdx.value();
6110     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6111     // Pass through operands which aren't fixed-length vectors.
6112     if (!V.getValueType().isFixedLengthVector()) {
6113       Ops.push_back(V);
6114       continue;
6115     }
6116     // "cast" fixed length vector to a scalable vector.
6117     MVT OpVT = V.getSimpleValueType();
6118     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6119     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6120            "Only fixed length vectors are supported!");
6121     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6122   }
6123 
6124   if (!VT.isFixedLengthVector())
6125     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6126 
6127   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6128 
6129   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6130 
6131   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6132 }
6133 
6134 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6135                                             unsigned MaskOpc,
6136                                             unsigned VecOpc) const {
6137   MVT VT = Op.getSimpleValueType();
6138   if (VT.getVectorElementType() != MVT::i1)
6139     return lowerVPOp(Op, DAG, VecOpc);
6140 
6141   // It is safe to drop mask parameter as masked-off elements are undef.
6142   SDValue Op1 = Op->getOperand(0);
6143   SDValue Op2 = Op->getOperand(1);
6144   SDValue VL = Op->getOperand(3);
6145 
6146   MVT ContainerVT = VT;
6147   const bool IsFixed = VT.isFixedLengthVector();
6148   if (IsFixed) {
6149     ContainerVT = getContainerForFixedLengthVector(VT);
6150     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6151     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6152   }
6153 
6154   SDLoc DL(Op);
6155   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6156   if (!IsFixed)
6157     return Val;
6158   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6159 }
6160 
6161 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6162 // matched to a RVV indexed load. The RVV indexed load instructions only
6163 // support the "unsigned unscaled" addressing mode; indices are implicitly
6164 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6165 // signed or scaled indexing is extended to the XLEN value type and scaled
6166 // accordingly.
6167 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6168                                                SelectionDAG &DAG) const {
6169   SDLoc DL(Op);
6170   MVT VT = Op.getSimpleValueType();
6171 
6172   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6173   EVT MemVT = MemSD->getMemoryVT();
6174   MachineMemOperand *MMO = MemSD->getMemOperand();
6175   SDValue Chain = MemSD->getChain();
6176   SDValue BasePtr = MemSD->getBasePtr();
6177 
6178   ISD::LoadExtType LoadExtType;
6179   SDValue Index, Mask, PassThru, VL;
6180 
6181   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6182     Index = VPGN->getIndex();
6183     Mask = VPGN->getMask();
6184     PassThru = DAG.getUNDEF(VT);
6185     VL = VPGN->getVectorLength();
6186     // VP doesn't support extending loads.
6187     LoadExtType = ISD::NON_EXTLOAD;
6188   } else {
6189     // Else it must be a MGATHER.
6190     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6191     Index = MGN->getIndex();
6192     Mask = MGN->getMask();
6193     PassThru = MGN->getPassThru();
6194     LoadExtType = MGN->getExtensionType();
6195   }
6196 
6197   MVT IndexVT = Index.getSimpleValueType();
6198   MVT XLenVT = Subtarget.getXLenVT();
6199 
6200   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6201          "Unexpected VTs!");
6202   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6203   // Targets have to explicitly opt-in for extending vector loads.
6204   assert(LoadExtType == ISD::NON_EXTLOAD &&
6205          "Unexpected extending MGATHER/VP_GATHER");
6206   (void)LoadExtType;
6207 
6208   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6209   // the selection of the masked intrinsics doesn't do this for us.
6210   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6211 
6212   MVT ContainerVT = VT;
6213   if (VT.isFixedLengthVector()) {
6214     // We need to use the larger of the result and index type to determine the
6215     // scalable type to use so we don't increase LMUL for any operand/result.
6216     if (VT.bitsGE(IndexVT)) {
6217       ContainerVT = getContainerForFixedLengthVector(VT);
6218       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6219                                  ContainerVT.getVectorElementCount());
6220     } else {
6221       IndexVT = getContainerForFixedLengthVector(IndexVT);
6222       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6223                                      IndexVT.getVectorElementCount());
6224     }
6225 
6226     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6227 
6228     if (!IsUnmasked) {
6229       MVT MaskVT =
6230           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6231       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6232       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6233     }
6234   }
6235 
6236   if (!VL)
6237     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6238 
6239   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6240     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6241     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6242                                    VL);
6243     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6244                         TrueMask, VL);
6245   }
6246 
6247   unsigned IntID =
6248       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6249   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6250   if (IsUnmasked)
6251     Ops.push_back(DAG.getUNDEF(ContainerVT));
6252   else
6253     Ops.push_back(PassThru);
6254   Ops.push_back(BasePtr);
6255   Ops.push_back(Index);
6256   if (!IsUnmasked)
6257     Ops.push_back(Mask);
6258   Ops.push_back(VL);
6259   if (!IsUnmasked)
6260     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6261 
6262   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6263   SDValue Result =
6264       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6265   Chain = Result.getValue(1);
6266 
6267   if (VT.isFixedLengthVector())
6268     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6269 
6270   return DAG.getMergeValues({Result, Chain}, DL);
6271 }
6272 
6273 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6274 // matched to a RVV indexed store. The RVV indexed store instructions only
6275 // support the "unsigned unscaled" addressing mode; indices are implicitly
6276 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6277 // signed or scaled indexing is extended to the XLEN value type and scaled
6278 // accordingly.
6279 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6280                                                 SelectionDAG &DAG) const {
6281   SDLoc DL(Op);
6282   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6283   EVT MemVT = MemSD->getMemoryVT();
6284   MachineMemOperand *MMO = MemSD->getMemOperand();
6285   SDValue Chain = MemSD->getChain();
6286   SDValue BasePtr = MemSD->getBasePtr();
6287 
6288   bool IsTruncatingStore = false;
6289   SDValue Index, Mask, Val, VL;
6290 
6291   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6292     Index = VPSN->getIndex();
6293     Mask = VPSN->getMask();
6294     Val = VPSN->getValue();
6295     VL = VPSN->getVectorLength();
6296     // VP doesn't support truncating stores.
6297     IsTruncatingStore = false;
6298   } else {
6299     // Else it must be a MSCATTER.
6300     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6301     Index = MSN->getIndex();
6302     Mask = MSN->getMask();
6303     Val = MSN->getValue();
6304     IsTruncatingStore = MSN->isTruncatingStore();
6305   }
6306 
6307   MVT VT = Val.getSimpleValueType();
6308   MVT IndexVT = Index.getSimpleValueType();
6309   MVT XLenVT = Subtarget.getXLenVT();
6310 
6311   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6312          "Unexpected VTs!");
6313   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6314   // Targets have to explicitly opt-in for extending vector loads and
6315   // truncating vector stores.
6316   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6317   (void)IsTruncatingStore;
6318 
6319   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6320   // the selection of the masked intrinsics doesn't do this for us.
6321   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6322 
6323   MVT ContainerVT = VT;
6324   if (VT.isFixedLengthVector()) {
6325     // We need to use the larger of the value and index type to determine the
6326     // scalable type to use so we don't increase LMUL for any operand/result.
6327     if (VT.bitsGE(IndexVT)) {
6328       ContainerVT = getContainerForFixedLengthVector(VT);
6329       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6330                                  ContainerVT.getVectorElementCount());
6331     } else {
6332       IndexVT = getContainerForFixedLengthVector(IndexVT);
6333       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6334                                      IndexVT.getVectorElementCount());
6335     }
6336 
6337     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6338     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6339 
6340     if (!IsUnmasked) {
6341       MVT MaskVT =
6342           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6343       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6344     }
6345   }
6346 
6347   if (!VL)
6348     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6349 
6350   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6351     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6352     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6353                                    VL);
6354     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6355                         TrueMask, VL);
6356   }
6357 
6358   unsigned IntID =
6359       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6360   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6361   Ops.push_back(Val);
6362   Ops.push_back(BasePtr);
6363   Ops.push_back(Index);
6364   if (!IsUnmasked)
6365     Ops.push_back(Mask);
6366   Ops.push_back(VL);
6367 
6368   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6369                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6370 }
6371 
6372 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6373                                                SelectionDAG &DAG) const {
6374   const MVT XLenVT = Subtarget.getXLenVT();
6375   SDLoc DL(Op);
6376   SDValue Chain = Op->getOperand(0);
6377   SDValue SysRegNo = DAG.getTargetConstant(
6378       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6379   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6380   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6381 
6382   // Encoding used for rounding mode in RISCV differs from that used in
6383   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6384   // table, which consists of a sequence of 4-bit fields, each representing
6385   // corresponding FLT_ROUNDS mode.
6386   static const int Table =
6387       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6388       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6389       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6390       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6391       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6392 
6393   SDValue Shift =
6394       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6395   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6396                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6397   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6398                                DAG.getConstant(7, DL, XLenVT));
6399 
6400   return DAG.getMergeValues({Masked, Chain}, DL);
6401 }
6402 
6403 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6404                                                SelectionDAG &DAG) const {
6405   const MVT XLenVT = Subtarget.getXLenVT();
6406   SDLoc DL(Op);
6407   SDValue Chain = Op->getOperand(0);
6408   SDValue RMValue = Op->getOperand(1);
6409   SDValue SysRegNo = DAG.getTargetConstant(
6410       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6411 
6412   // Encoding used for rounding mode in RISCV differs from that used in
6413   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6414   // a table, which consists of a sequence of 4-bit fields, each representing
6415   // corresponding RISCV mode.
6416   static const unsigned Table =
6417       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6418       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6419       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6420       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6421       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6422 
6423   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6424                               DAG.getConstant(2, DL, XLenVT));
6425   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6426                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6427   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6428                         DAG.getConstant(0x7, DL, XLenVT));
6429   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6430                      RMValue);
6431 }
6432 
6433 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6434   switch (IntNo) {
6435   default:
6436     llvm_unreachable("Unexpected Intrinsic");
6437   case Intrinsic::riscv_bcompress:
6438     return RISCVISD::BCOMPRESSW;
6439   case Intrinsic::riscv_bdecompress:
6440     return RISCVISD::BDECOMPRESSW;
6441   case Intrinsic::riscv_bfp:
6442     return RISCVISD::BFPW;
6443   case Intrinsic::riscv_fsl:
6444     return RISCVISD::FSLW;
6445   case Intrinsic::riscv_fsr:
6446     return RISCVISD::FSRW;
6447   }
6448 }
6449 
6450 // Converts the given intrinsic to a i64 operation with any extension.
6451 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6452                                          unsigned IntNo) {
6453   SDLoc DL(N);
6454   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6455   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6456   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6457   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6458   // ReplaceNodeResults requires we maintain the same type for the return value.
6459   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6460 }
6461 
6462 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6463 // form of the given Opcode.
6464 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6465   switch (Opcode) {
6466   default:
6467     llvm_unreachable("Unexpected opcode");
6468   case ISD::SHL:
6469     return RISCVISD::SLLW;
6470   case ISD::SRA:
6471     return RISCVISD::SRAW;
6472   case ISD::SRL:
6473     return RISCVISD::SRLW;
6474   case ISD::SDIV:
6475     return RISCVISD::DIVW;
6476   case ISD::UDIV:
6477     return RISCVISD::DIVUW;
6478   case ISD::UREM:
6479     return RISCVISD::REMUW;
6480   case ISD::ROTL:
6481     return RISCVISD::ROLW;
6482   case ISD::ROTR:
6483     return RISCVISD::RORW;
6484   }
6485 }
6486 
6487 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6488 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6489 // otherwise be promoted to i64, making it difficult to select the
6490 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6491 // type i8/i16/i32 is lost.
6492 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6493                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6494   SDLoc DL(N);
6495   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6496   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6497   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6498   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6499   // ReplaceNodeResults requires we maintain the same type for the return value.
6500   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6501 }
6502 
6503 // Converts the given 32-bit operation to a i64 operation with signed extension
6504 // semantic to reduce the signed extension instructions.
6505 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6506   SDLoc DL(N);
6507   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6508   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6509   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6510   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6511                                DAG.getValueType(MVT::i32));
6512   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6513 }
6514 
6515 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6516                                              SmallVectorImpl<SDValue> &Results,
6517                                              SelectionDAG &DAG) const {
6518   SDLoc DL(N);
6519   switch (N->getOpcode()) {
6520   default:
6521     llvm_unreachable("Don't know how to custom type legalize this operation!");
6522   case ISD::STRICT_FP_TO_SINT:
6523   case ISD::STRICT_FP_TO_UINT:
6524   case ISD::FP_TO_SINT:
6525   case ISD::FP_TO_UINT: {
6526     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6527            "Unexpected custom legalisation");
6528     bool IsStrict = N->isStrictFPOpcode();
6529     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6530                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6531     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6532     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6533         TargetLowering::TypeSoftenFloat) {
6534       if (!isTypeLegal(Op0.getValueType()))
6535         return;
6536       if (IsStrict) {
6537         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6538                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6539         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6540         SDValue Res = DAG.getNode(
6541             Opc, DL, VTs, N->getOperand(0), Op0,
6542             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6543         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6544         Results.push_back(Res.getValue(1));
6545         return;
6546       }
6547       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6548       SDValue Res =
6549           DAG.getNode(Opc, DL, MVT::i64, Op0,
6550                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6551       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6552       return;
6553     }
6554     // If the FP type needs to be softened, emit a library call using the 'si'
6555     // version. If we left it to default legalization we'd end up with 'di'. If
6556     // the FP type doesn't need to be softened just let generic type
6557     // legalization promote the result type.
6558     RTLIB::Libcall LC;
6559     if (IsSigned)
6560       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6561     else
6562       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6563     MakeLibCallOptions CallOptions;
6564     EVT OpVT = Op0.getValueType();
6565     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6566     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6567     SDValue Result;
6568     std::tie(Result, Chain) =
6569         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6570     Results.push_back(Result);
6571     if (IsStrict)
6572       Results.push_back(Chain);
6573     break;
6574   }
6575   case ISD::READCYCLECOUNTER: {
6576     assert(!Subtarget.is64Bit() &&
6577            "READCYCLECOUNTER only has custom type legalization on riscv32");
6578 
6579     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6580     SDValue RCW =
6581         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6582 
6583     Results.push_back(
6584         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6585     Results.push_back(RCW.getValue(2));
6586     break;
6587   }
6588   case ISD::MUL: {
6589     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6590     unsigned XLen = Subtarget.getXLen();
6591     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6592     if (Size > XLen) {
6593       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6594       SDValue LHS = N->getOperand(0);
6595       SDValue RHS = N->getOperand(1);
6596       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6597 
6598       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6599       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6600       // We need exactly one side to be unsigned.
6601       if (LHSIsU == RHSIsU)
6602         return;
6603 
6604       auto MakeMULPair = [&](SDValue S, SDValue U) {
6605         MVT XLenVT = Subtarget.getXLenVT();
6606         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6607         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6608         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6609         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6610         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6611       };
6612 
6613       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6614       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6615 
6616       // The other operand should be signed, but still prefer MULH when
6617       // possible.
6618       if (RHSIsU && LHSIsS && !RHSIsS)
6619         Results.push_back(MakeMULPair(LHS, RHS));
6620       else if (LHSIsU && RHSIsS && !LHSIsS)
6621         Results.push_back(MakeMULPair(RHS, LHS));
6622 
6623       return;
6624     }
6625     LLVM_FALLTHROUGH;
6626   }
6627   case ISD::ADD:
6628   case ISD::SUB:
6629     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6630            "Unexpected custom legalisation");
6631     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6632     break;
6633   case ISD::SHL:
6634   case ISD::SRA:
6635   case ISD::SRL:
6636     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6637            "Unexpected custom legalisation");
6638     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6639       Results.push_back(customLegalizeToWOp(N, DAG));
6640       break;
6641     }
6642 
6643     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6644     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6645     // shift amount.
6646     if (N->getOpcode() == ISD::SHL) {
6647       SDLoc DL(N);
6648       SDValue NewOp0 =
6649           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6650       SDValue NewOp1 =
6651           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6652       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6653       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6654                                    DAG.getValueType(MVT::i32));
6655       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6656     }
6657 
6658     break;
6659   case ISD::ROTL:
6660   case ISD::ROTR:
6661     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6662            "Unexpected custom legalisation");
6663     Results.push_back(customLegalizeToWOp(N, DAG));
6664     break;
6665   case ISD::CTTZ:
6666   case ISD::CTTZ_ZERO_UNDEF:
6667   case ISD::CTLZ:
6668   case ISD::CTLZ_ZERO_UNDEF: {
6669     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6670            "Unexpected custom legalisation");
6671 
6672     SDValue NewOp0 =
6673         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6674     bool IsCTZ =
6675         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6676     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6677     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6678     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6679     return;
6680   }
6681   case ISD::SDIV:
6682   case ISD::UDIV:
6683   case ISD::UREM: {
6684     MVT VT = N->getSimpleValueType(0);
6685     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6686            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6687            "Unexpected custom legalisation");
6688     // Don't promote division/remainder by constant since we should expand those
6689     // to multiply by magic constant.
6690     // FIXME: What if the expansion is disabled for minsize.
6691     if (N->getOperand(1).getOpcode() == ISD::Constant)
6692       return;
6693 
6694     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6695     // the upper 32 bits. For other types we need to sign or zero extend
6696     // based on the opcode.
6697     unsigned ExtOpc = ISD::ANY_EXTEND;
6698     if (VT != MVT::i32)
6699       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6700                                            : ISD::ZERO_EXTEND;
6701 
6702     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6703     break;
6704   }
6705   case ISD::UADDO:
6706   case ISD::USUBO: {
6707     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6708            "Unexpected custom legalisation");
6709     bool IsAdd = N->getOpcode() == ISD::UADDO;
6710     // Create an ADDW or SUBW.
6711     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6712     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6713     SDValue Res =
6714         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6715     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6716                       DAG.getValueType(MVT::i32));
6717 
6718     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6719     // Since the inputs are sign extended from i32, this is equivalent to
6720     // comparing the lower 32 bits.
6721     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6722     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6723                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6724 
6725     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6726     Results.push_back(Overflow);
6727     return;
6728   }
6729   case ISD::UADDSAT:
6730   case ISD::USUBSAT: {
6731     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6732            "Unexpected custom legalisation");
6733     if (Subtarget.hasStdExtZbb()) {
6734       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6735       // sign extend allows overflow of the lower 32 bits to be detected on
6736       // the promoted size.
6737       SDValue LHS =
6738           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6739       SDValue RHS =
6740           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6741       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6742       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6743       return;
6744     }
6745 
6746     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6747     // promotion for UADDO/USUBO.
6748     Results.push_back(expandAddSubSat(N, DAG));
6749     return;
6750   }
6751   case ISD::ABS: {
6752     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6753            "Unexpected custom legalisation");
6754           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6755 
6756     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6757 
6758     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6759 
6760     // Freeze the source so we can increase it's use count.
6761     Src = DAG.getFreeze(Src);
6762 
6763     // Copy sign bit to all bits using the sraiw pattern.
6764     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6765                                    DAG.getValueType(MVT::i32));
6766     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6767                            DAG.getConstant(31, DL, MVT::i64));
6768 
6769     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6770     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6771 
6772     // NOTE: The result is only required to be anyextended, but sext is
6773     // consistent with type legalization of sub.
6774     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6775                          DAG.getValueType(MVT::i32));
6776     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6777     return;
6778   }
6779   case ISD::BITCAST: {
6780     EVT VT = N->getValueType(0);
6781     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6782     SDValue Op0 = N->getOperand(0);
6783     EVT Op0VT = Op0.getValueType();
6784     MVT XLenVT = Subtarget.getXLenVT();
6785     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6786       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6787       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6788     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6789                Subtarget.hasStdExtF()) {
6790       SDValue FPConv =
6791           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6792       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6793     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6794                isTypeLegal(Op0VT)) {
6795       // Custom-legalize bitcasts from fixed-length vector types to illegal
6796       // scalar types in order to improve codegen. Bitcast the vector to a
6797       // one-element vector type whose element type is the same as the result
6798       // type, and extract the first element.
6799       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6800       if (isTypeLegal(BVT)) {
6801         SDValue BVec = DAG.getBitcast(BVT, Op0);
6802         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6803                                       DAG.getConstant(0, DL, XLenVT)));
6804       }
6805     }
6806     break;
6807   }
6808   case RISCVISD::GREV:
6809   case RISCVISD::GORC:
6810   case RISCVISD::SHFL: {
6811     MVT VT = N->getSimpleValueType(0);
6812     MVT XLenVT = Subtarget.getXLenVT();
6813     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
6814            "Unexpected custom legalisation");
6815     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6816     assert((Subtarget.hasStdExtZbp() ||
6817             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
6818              N->getConstantOperandVal(1) == 7)) &&
6819            "Unexpected extension");
6820     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6821     SDValue NewOp1 =
6822         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
6823     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
6824     // ReplaceNodeResults requires we maintain the same type for the return
6825     // value.
6826     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
6827     break;
6828   }
6829   case ISD::BSWAP:
6830   case ISD::BITREVERSE: {
6831     MVT VT = N->getSimpleValueType(0);
6832     MVT XLenVT = Subtarget.getXLenVT();
6833     assert((VT == MVT::i8 || VT == MVT::i16 ||
6834             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6835            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6836     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6837     unsigned Imm = VT.getSizeInBits() - 1;
6838     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6839     if (N->getOpcode() == ISD::BSWAP)
6840       Imm &= ~0x7U;
6841     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
6842                                 DAG.getConstant(Imm, DL, XLenVT));
6843     // ReplaceNodeResults requires we maintain the same type for the return
6844     // value.
6845     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6846     break;
6847   }
6848   case ISD::FSHL:
6849   case ISD::FSHR: {
6850     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6851            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6852     SDValue NewOp0 =
6853         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6854     SDValue NewOp1 =
6855         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6856     SDValue NewShAmt =
6857         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6858     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6859     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6860     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6861                            DAG.getConstant(0x1f, DL, MVT::i64));
6862     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6863     // instruction use different orders. fshl will return its first operand for
6864     // shift of zero, fshr will return its second operand. fsl and fsr both
6865     // return rs1 so the ISD nodes need to have different operand orders.
6866     // Shift amount is in rs2.
6867     unsigned Opc = RISCVISD::FSLW;
6868     if (N->getOpcode() == ISD::FSHR) {
6869       std::swap(NewOp0, NewOp1);
6870       Opc = RISCVISD::FSRW;
6871     }
6872     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6873     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6874     break;
6875   }
6876   case ISD::EXTRACT_VECTOR_ELT: {
6877     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6878     // type is illegal (currently only vXi64 RV32).
6879     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6880     // transferred to the destination register. We issue two of these from the
6881     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6882     // first element.
6883     SDValue Vec = N->getOperand(0);
6884     SDValue Idx = N->getOperand(1);
6885 
6886     // The vector type hasn't been legalized yet so we can't issue target
6887     // specific nodes if it needs legalization.
6888     // FIXME: We would manually legalize if it's important.
6889     if (!isTypeLegal(Vec.getValueType()))
6890       return;
6891 
6892     MVT VecVT = Vec.getSimpleValueType();
6893 
6894     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6895            VecVT.getVectorElementType() == MVT::i64 &&
6896            "Unexpected EXTRACT_VECTOR_ELT legalization");
6897 
6898     // If this is a fixed vector, we need to convert it to a scalable vector.
6899     MVT ContainerVT = VecVT;
6900     if (VecVT.isFixedLengthVector()) {
6901       ContainerVT = getContainerForFixedLengthVector(VecVT);
6902       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6903     }
6904 
6905     MVT XLenVT = Subtarget.getXLenVT();
6906 
6907     // Use a VL of 1 to avoid processing more elements than we need.
6908     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6909     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6910     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6911 
6912     // Unless the index is known to be 0, we must slide the vector down to get
6913     // the desired element into index 0.
6914     if (!isNullConstant(Idx)) {
6915       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6916                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6917     }
6918 
6919     // Extract the lower XLEN bits of the correct vector element.
6920     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6921 
6922     // To extract the upper XLEN bits of the vector element, shift the first
6923     // element right by 32 bits and re-extract the lower XLEN bits.
6924     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6925                                      DAG.getUNDEF(ContainerVT),
6926                                      DAG.getConstant(32, DL, XLenVT), VL);
6927     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6928                                  ThirtyTwoV, Mask, VL);
6929 
6930     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6931 
6932     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6933     break;
6934   }
6935   case ISD::INTRINSIC_WO_CHAIN: {
6936     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6937     switch (IntNo) {
6938     default:
6939       llvm_unreachable(
6940           "Don't know how to custom type legalize this intrinsic!");
6941     case Intrinsic::riscv_grev:
6942     case Intrinsic::riscv_gorc: {
6943       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6944              "Unexpected custom legalisation");
6945       SDValue NewOp1 =
6946           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6947       SDValue NewOp2 =
6948           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6949       unsigned Opc =
6950           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
6951       // If the control is a constant, promote the node by clearing any extra
6952       // bits bits in the control. isel will form greviw/gorciw if the result is
6953       // sign extended.
6954       if (isa<ConstantSDNode>(NewOp2)) {
6955         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6956                              DAG.getConstant(0x1f, DL, MVT::i64));
6957         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
6958       }
6959       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6960       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6961       break;
6962     }
6963     case Intrinsic::riscv_bcompress:
6964     case Intrinsic::riscv_bdecompress:
6965     case Intrinsic::riscv_bfp: {
6966       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6967              "Unexpected custom legalisation");
6968       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6969       break;
6970     }
6971     case Intrinsic::riscv_fsl:
6972     case Intrinsic::riscv_fsr: {
6973       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6974              "Unexpected custom legalisation");
6975       SDValue NewOp1 =
6976           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6977       SDValue NewOp2 =
6978           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6979       SDValue NewOp3 =
6980           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6981       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6982       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6983       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6984       break;
6985     }
6986     case Intrinsic::riscv_orc_b: {
6987       // Lower to the GORCI encoding for orc.b with the operand extended.
6988       SDValue NewOp =
6989           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6990       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
6991                                 DAG.getConstant(7, DL, MVT::i64));
6992       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6993       return;
6994     }
6995     case Intrinsic::riscv_shfl:
6996     case Intrinsic::riscv_unshfl: {
6997       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6998              "Unexpected custom legalisation");
6999       SDValue NewOp1 =
7000           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7001       SDValue NewOp2 =
7002           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7003       unsigned Opc =
7004           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7005       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7006       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7007       // will be shuffled the same way as the lower 32 bit half, but the two
7008       // halves won't cross.
7009       if (isa<ConstantSDNode>(NewOp2)) {
7010         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7011                              DAG.getConstant(0xf, DL, MVT::i64));
7012         Opc =
7013             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7014       }
7015       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7016       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7017       break;
7018     }
7019     case Intrinsic::riscv_vmv_x_s: {
7020       EVT VT = N->getValueType(0);
7021       MVT XLenVT = Subtarget.getXLenVT();
7022       if (VT.bitsLT(XLenVT)) {
7023         // Simple case just extract using vmv.x.s and truncate.
7024         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7025                                       Subtarget.getXLenVT(), N->getOperand(1));
7026         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7027         return;
7028       }
7029 
7030       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7031              "Unexpected custom legalization");
7032 
7033       // We need to do the move in two steps.
7034       SDValue Vec = N->getOperand(1);
7035       MVT VecVT = Vec.getSimpleValueType();
7036 
7037       // First extract the lower XLEN bits of the element.
7038       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7039 
7040       // To extract the upper XLEN bits of the vector element, shift the first
7041       // element right by 32 bits and re-extract the lower XLEN bits.
7042       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7043       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7044       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
7045       SDValue ThirtyTwoV =
7046           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7047                       DAG.getConstant(32, DL, XLenVT), VL);
7048       SDValue LShr32 =
7049           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7050       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7051 
7052       Results.push_back(
7053           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7054       break;
7055     }
7056     }
7057     break;
7058   }
7059   case ISD::VECREDUCE_ADD:
7060   case ISD::VECREDUCE_AND:
7061   case ISD::VECREDUCE_OR:
7062   case ISD::VECREDUCE_XOR:
7063   case ISD::VECREDUCE_SMAX:
7064   case ISD::VECREDUCE_UMAX:
7065   case ISD::VECREDUCE_SMIN:
7066   case ISD::VECREDUCE_UMIN:
7067     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7068       Results.push_back(V);
7069     break;
7070   case ISD::VP_REDUCE_ADD:
7071   case ISD::VP_REDUCE_AND:
7072   case ISD::VP_REDUCE_OR:
7073   case ISD::VP_REDUCE_XOR:
7074   case ISD::VP_REDUCE_SMAX:
7075   case ISD::VP_REDUCE_UMAX:
7076   case ISD::VP_REDUCE_SMIN:
7077   case ISD::VP_REDUCE_UMIN:
7078     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7079       Results.push_back(V);
7080     break;
7081   case ISD::FLT_ROUNDS_: {
7082     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7083     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7084     Results.push_back(Res.getValue(0));
7085     Results.push_back(Res.getValue(1));
7086     break;
7087   }
7088   }
7089 }
7090 
7091 // A structure to hold one of the bit-manipulation patterns below. Together, a
7092 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7093 //   (or (and (shl x, 1), 0xAAAAAAAA),
7094 //       (and (srl x, 1), 0x55555555))
7095 struct RISCVBitmanipPat {
7096   SDValue Op;
7097   unsigned ShAmt;
7098   bool IsSHL;
7099 
7100   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7101     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7102   }
7103 };
7104 
7105 // Matches patterns of the form
7106 //   (and (shl x, C2), (C1 << C2))
7107 //   (and (srl x, C2), C1)
7108 //   (shl (and x, C1), C2)
7109 //   (srl (and x, (C1 << C2)), C2)
7110 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7111 // The expected masks for each shift amount are specified in BitmanipMasks where
7112 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7113 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7114 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7115 // XLen is 64.
7116 static Optional<RISCVBitmanipPat>
7117 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7118   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7119          "Unexpected number of masks");
7120   Optional<uint64_t> Mask;
7121   // Optionally consume a mask around the shift operation.
7122   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7123     Mask = Op.getConstantOperandVal(1);
7124     Op = Op.getOperand(0);
7125   }
7126   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7127     return None;
7128   bool IsSHL = Op.getOpcode() == ISD::SHL;
7129 
7130   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7131     return None;
7132   uint64_t ShAmt = Op.getConstantOperandVal(1);
7133 
7134   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7135   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7136     return None;
7137   // If we don't have enough masks for 64 bit, then we must be trying to
7138   // match SHFL so we're only allowed to shift 1/4 of the width.
7139   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7140     return None;
7141 
7142   SDValue Src = Op.getOperand(0);
7143 
7144   // The expected mask is shifted left when the AND is found around SHL
7145   // patterns.
7146   //   ((x >> 1) & 0x55555555)
7147   //   ((x << 1) & 0xAAAAAAAA)
7148   bool SHLExpMask = IsSHL;
7149 
7150   if (!Mask) {
7151     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7152     // the mask is all ones: consume that now.
7153     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7154       Mask = Src.getConstantOperandVal(1);
7155       Src = Src.getOperand(0);
7156       // The expected mask is now in fact shifted left for SRL, so reverse the
7157       // decision.
7158       //   ((x & 0xAAAAAAAA) >> 1)
7159       //   ((x & 0x55555555) << 1)
7160       SHLExpMask = !SHLExpMask;
7161     } else {
7162       // Use a default shifted mask of all-ones if there's no AND, truncated
7163       // down to the expected width. This simplifies the logic later on.
7164       Mask = maskTrailingOnes<uint64_t>(Width);
7165       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7166     }
7167   }
7168 
7169   unsigned MaskIdx = Log2_32(ShAmt);
7170   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7171 
7172   if (SHLExpMask)
7173     ExpMask <<= ShAmt;
7174 
7175   if (Mask != ExpMask)
7176     return None;
7177 
7178   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7179 }
7180 
7181 // Matches any of the following bit-manipulation patterns:
7182 //   (and (shl x, 1), (0x55555555 << 1))
7183 //   (and (srl x, 1), 0x55555555)
7184 //   (shl (and x, 0x55555555), 1)
7185 //   (srl (and x, (0x55555555 << 1)), 1)
7186 // where the shift amount and mask may vary thus:
7187 //   [1]  = 0x55555555 / 0xAAAAAAAA
7188 //   [2]  = 0x33333333 / 0xCCCCCCCC
7189 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7190 //   [8]  = 0x00FF00FF / 0xFF00FF00
7191 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7192 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7193 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7194   // These are the unshifted masks which we use to match bit-manipulation
7195   // patterns. They may be shifted left in certain circumstances.
7196   static const uint64_t BitmanipMasks[] = {
7197       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7198       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7199 
7200   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7201 }
7202 
7203 // Match the following pattern as a GREVI(W) operation
7204 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7205 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7206                                const RISCVSubtarget &Subtarget) {
7207   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7208   EVT VT = Op.getValueType();
7209 
7210   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7211     auto LHS = matchGREVIPat(Op.getOperand(0));
7212     auto RHS = matchGREVIPat(Op.getOperand(1));
7213     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7214       SDLoc DL(Op);
7215       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7216                          DAG.getConstant(LHS->ShAmt, DL, VT));
7217     }
7218   }
7219   return SDValue();
7220 }
7221 
7222 // Matches any the following pattern as a GORCI(W) operation
7223 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7224 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7225 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7226 // Note that with the variant of 3.,
7227 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7228 // the inner pattern will first be matched as GREVI and then the outer
7229 // pattern will be matched to GORC via the first rule above.
7230 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7231 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7232                                const RISCVSubtarget &Subtarget) {
7233   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7234   EVT VT = Op.getValueType();
7235 
7236   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7237     SDLoc DL(Op);
7238     SDValue Op0 = Op.getOperand(0);
7239     SDValue Op1 = Op.getOperand(1);
7240 
7241     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7242       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7243           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7244           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7245         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7246       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7247       if ((Reverse.getOpcode() == ISD::ROTL ||
7248            Reverse.getOpcode() == ISD::ROTR) &&
7249           Reverse.getOperand(0) == X &&
7250           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7251         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7252         if (RotAmt == (VT.getSizeInBits() / 2))
7253           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7254                              DAG.getConstant(RotAmt, DL, VT));
7255       }
7256       return SDValue();
7257     };
7258 
7259     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7260     if (SDValue V = MatchOROfReverse(Op0, Op1))
7261       return V;
7262     if (SDValue V = MatchOROfReverse(Op1, Op0))
7263       return V;
7264 
7265     // OR is commutable so canonicalize its OR operand to the left
7266     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7267       std::swap(Op0, Op1);
7268     if (Op0.getOpcode() != ISD::OR)
7269       return SDValue();
7270     SDValue OrOp0 = Op0.getOperand(0);
7271     SDValue OrOp1 = Op0.getOperand(1);
7272     auto LHS = matchGREVIPat(OrOp0);
7273     // OR is commutable so swap the operands and try again: x might have been
7274     // on the left
7275     if (!LHS) {
7276       std::swap(OrOp0, OrOp1);
7277       LHS = matchGREVIPat(OrOp0);
7278     }
7279     auto RHS = matchGREVIPat(Op1);
7280     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7281       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7282                          DAG.getConstant(LHS->ShAmt, DL, VT));
7283     }
7284   }
7285   return SDValue();
7286 }
7287 
7288 // Matches any of the following bit-manipulation patterns:
7289 //   (and (shl x, 1), (0x22222222 << 1))
7290 //   (and (srl x, 1), 0x22222222)
7291 //   (shl (and x, 0x22222222), 1)
7292 //   (srl (and x, (0x22222222 << 1)), 1)
7293 // where the shift amount and mask may vary thus:
7294 //   [1]  = 0x22222222 / 0x44444444
7295 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7296 //   [4]  = 0x00F000F0 / 0x0F000F00
7297 //   [8]  = 0x0000FF00 / 0x00FF0000
7298 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7299 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7300   // These are the unshifted masks which we use to match bit-manipulation
7301   // patterns. They may be shifted left in certain circumstances.
7302   static const uint64_t BitmanipMasks[] = {
7303       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7304       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7305 
7306   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7307 }
7308 
7309 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7310 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7311                                const RISCVSubtarget &Subtarget) {
7312   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7313   EVT VT = Op.getValueType();
7314 
7315   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7316     return SDValue();
7317 
7318   SDValue Op0 = Op.getOperand(0);
7319   SDValue Op1 = Op.getOperand(1);
7320 
7321   // Or is commutable so canonicalize the second OR to the LHS.
7322   if (Op0.getOpcode() != ISD::OR)
7323     std::swap(Op0, Op1);
7324   if (Op0.getOpcode() != ISD::OR)
7325     return SDValue();
7326 
7327   // We found an inner OR, so our operands are the operands of the inner OR
7328   // and the other operand of the outer OR.
7329   SDValue A = Op0.getOperand(0);
7330   SDValue B = Op0.getOperand(1);
7331   SDValue C = Op1;
7332 
7333   auto Match1 = matchSHFLPat(A);
7334   auto Match2 = matchSHFLPat(B);
7335 
7336   // If neither matched, we failed.
7337   if (!Match1 && !Match2)
7338     return SDValue();
7339 
7340   // We had at least one match. if one failed, try the remaining C operand.
7341   if (!Match1) {
7342     std::swap(A, C);
7343     Match1 = matchSHFLPat(A);
7344     if (!Match1)
7345       return SDValue();
7346   } else if (!Match2) {
7347     std::swap(B, C);
7348     Match2 = matchSHFLPat(B);
7349     if (!Match2)
7350       return SDValue();
7351   }
7352   assert(Match1 && Match2);
7353 
7354   // Make sure our matches pair up.
7355   if (!Match1->formsPairWith(*Match2))
7356     return SDValue();
7357 
7358   // All the remains is to make sure C is an AND with the same input, that masks
7359   // out the bits that are being shuffled.
7360   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7361       C.getOperand(0) != Match1->Op)
7362     return SDValue();
7363 
7364   uint64_t Mask = C.getConstantOperandVal(1);
7365 
7366   static const uint64_t BitmanipMasks[] = {
7367       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7368       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7369   };
7370 
7371   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7372   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7373   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7374 
7375   if (Mask != ExpMask)
7376     return SDValue();
7377 
7378   SDLoc DL(Op);
7379   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7380                      DAG.getConstant(Match1->ShAmt, DL, VT));
7381 }
7382 
7383 // Optimize (add (shl x, c0), (shl y, c1)) ->
7384 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7385 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7386                                   const RISCVSubtarget &Subtarget) {
7387   // Perform this optimization only in the zba extension.
7388   if (!Subtarget.hasStdExtZba())
7389     return SDValue();
7390 
7391   // Skip for vector types and larger types.
7392   EVT VT = N->getValueType(0);
7393   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7394     return SDValue();
7395 
7396   // The two operand nodes must be SHL and have no other use.
7397   SDValue N0 = N->getOperand(0);
7398   SDValue N1 = N->getOperand(1);
7399   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7400       !N0->hasOneUse() || !N1->hasOneUse())
7401     return SDValue();
7402 
7403   // Check c0 and c1.
7404   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7405   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7406   if (!N0C || !N1C)
7407     return SDValue();
7408   int64_t C0 = N0C->getSExtValue();
7409   int64_t C1 = N1C->getSExtValue();
7410   if (C0 <= 0 || C1 <= 0)
7411     return SDValue();
7412 
7413   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7414   int64_t Bits = std::min(C0, C1);
7415   int64_t Diff = std::abs(C0 - C1);
7416   if (Diff != 1 && Diff != 2 && Diff != 3)
7417     return SDValue();
7418 
7419   // Build nodes.
7420   SDLoc DL(N);
7421   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7422   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7423   SDValue NA0 =
7424       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7425   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7426   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7427 }
7428 
7429 // Combine
7430 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7431 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7432 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7433 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7434 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7435 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7436 // The grev patterns represents BSWAP.
7437 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7438 // off the grev.
7439 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7440                                           const RISCVSubtarget &Subtarget) {
7441   bool IsWInstruction =
7442       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7443   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7444           IsWInstruction) &&
7445          "Unexpected opcode!");
7446   SDValue Src = N->getOperand(0);
7447   EVT VT = N->getValueType(0);
7448   SDLoc DL(N);
7449 
7450   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7451     return SDValue();
7452 
7453   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7454       !isa<ConstantSDNode>(Src.getOperand(1)))
7455     return SDValue();
7456 
7457   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7458   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7459 
7460   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7461   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7462   unsigned ShAmt1 = N->getConstantOperandVal(1);
7463   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7464   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7465     return SDValue();
7466 
7467   Src = Src.getOperand(0);
7468 
7469   // Toggle bit the MSB of the shift.
7470   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7471   if (CombinedShAmt == 0)
7472     return Src;
7473 
7474   SDValue Res = DAG.getNode(
7475       RISCVISD::GREV, DL, VT, Src,
7476       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7477   if (!IsWInstruction)
7478     return Res;
7479 
7480   // Sign extend the result to match the behavior of the rotate. This will be
7481   // selected to GREVIW in isel.
7482   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7483                      DAG.getValueType(MVT::i32));
7484 }
7485 
7486 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7487 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7488 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7489 // not undo itself, but they are redundant.
7490 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7491   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7492   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7493   SDValue Src = N->getOperand(0);
7494 
7495   if (Src.getOpcode() != N->getOpcode())
7496     return SDValue();
7497 
7498   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7499       !isa<ConstantSDNode>(Src.getOperand(1)))
7500     return SDValue();
7501 
7502   unsigned ShAmt1 = N->getConstantOperandVal(1);
7503   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7504   Src = Src.getOperand(0);
7505 
7506   unsigned CombinedShAmt;
7507   if (IsGORC)
7508     CombinedShAmt = ShAmt1 | ShAmt2;
7509   else
7510     CombinedShAmt = ShAmt1 ^ ShAmt2;
7511 
7512   if (CombinedShAmt == 0)
7513     return Src;
7514 
7515   SDLoc DL(N);
7516   return DAG.getNode(
7517       N->getOpcode(), DL, N->getValueType(0), Src,
7518       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7519 }
7520 
7521 // Combine a constant select operand into its use:
7522 //
7523 // (and (select cond, -1, c), x)
7524 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7525 // (or  (select cond, 0, c), x)
7526 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7527 // (xor (select cond, 0, c), x)
7528 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7529 // (add (select cond, 0, c), x)
7530 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7531 // (sub x, (select cond, 0, c))
7532 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7533 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7534                                    SelectionDAG &DAG, bool AllOnes) {
7535   EVT VT = N->getValueType(0);
7536 
7537   // Skip vectors.
7538   if (VT.isVector())
7539     return SDValue();
7540 
7541   if ((Slct.getOpcode() != ISD::SELECT &&
7542        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7543       !Slct.hasOneUse())
7544     return SDValue();
7545 
7546   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7547     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7548   };
7549 
7550   bool SwapSelectOps;
7551   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7552   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7553   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7554   SDValue NonConstantVal;
7555   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7556     SwapSelectOps = false;
7557     NonConstantVal = FalseVal;
7558   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7559     SwapSelectOps = true;
7560     NonConstantVal = TrueVal;
7561   } else
7562     return SDValue();
7563 
7564   // Slct is now know to be the desired identity constant when CC is true.
7565   TrueVal = OtherOp;
7566   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7567   // Unless SwapSelectOps says the condition should be false.
7568   if (SwapSelectOps)
7569     std::swap(TrueVal, FalseVal);
7570 
7571   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7572     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7573                        {Slct.getOperand(0), Slct.getOperand(1),
7574                         Slct.getOperand(2), TrueVal, FalseVal});
7575 
7576   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7577                      {Slct.getOperand(0), TrueVal, FalseVal});
7578 }
7579 
7580 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7581 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7582                                               bool AllOnes) {
7583   SDValue N0 = N->getOperand(0);
7584   SDValue N1 = N->getOperand(1);
7585   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7586     return Result;
7587   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7588     return Result;
7589   return SDValue();
7590 }
7591 
7592 // Transform (add (mul x, c0), c1) ->
7593 //           (add (mul (add x, c1/c0), c0), c1%c0).
7594 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7595 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7596 // to an infinite loop in DAGCombine if transformed.
7597 // Or transform (add (mul x, c0), c1) ->
7598 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7599 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7600 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7601 // lead to an infinite loop in DAGCombine if transformed.
7602 // Or transform (add (mul x, c0), c1) ->
7603 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7604 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7605 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7606 // lead to an infinite loop in DAGCombine if transformed.
7607 // Or transform (add (mul x, c0), c1) ->
7608 //              (mul (add x, c1/c0), c0).
7609 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7610 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7611                                      const RISCVSubtarget &Subtarget) {
7612   // Skip for vector types and larger types.
7613   EVT VT = N->getValueType(0);
7614   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7615     return SDValue();
7616   // The first operand node must be a MUL and has no other use.
7617   SDValue N0 = N->getOperand(0);
7618   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7619     return SDValue();
7620   // Check if c0 and c1 match above conditions.
7621   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7622   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7623   if (!N0C || !N1C)
7624     return SDValue();
7625   // If N0C has multiple uses it's possible one of the cases in
7626   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7627   // in an infinite loop.
7628   if (!N0C->hasOneUse())
7629     return SDValue();
7630   int64_t C0 = N0C->getSExtValue();
7631   int64_t C1 = N1C->getSExtValue();
7632   int64_t CA, CB;
7633   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7634     return SDValue();
7635   // Search for proper CA (non-zero) and CB that both are simm12.
7636   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7637       !isInt<12>(C0 * (C1 / C0))) {
7638     CA = C1 / C0;
7639     CB = C1 % C0;
7640   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7641              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7642     CA = C1 / C0 + 1;
7643     CB = C1 % C0 - C0;
7644   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7645              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7646     CA = C1 / C0 - 1;
7647     CB = C1 % C0 + C0;
7648   } else
7649     return SDValue();
7650   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7651   SDLoc DL(N);
7652   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7653                              DAG.getConstant(CA, DL, VT));
7654   SDValue New1 =
7655       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7656   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7657 }
7658 
7659 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7660                                  const RISCVSubtarget &Subtarget) {
7661   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7662     return V;
7663   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7664     return V;
7665   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7666   //      (select lhs, rhs, cc, x, (add x, y))
7667   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7668 }
7669 
7670 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7671   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7672   //      (select lhs, rhs, cc, x, (sub x, y))
7673   SDValue N0 = N->getOperand(0);
7674   SDValue N1 = N->getOperand(1);
7675   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7676 }
7677 
7678 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7679   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7680   //      (select lhs, rhs, cc, x, (and x, y))
7681   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7682 }
7683 
7684 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7685                                 const RISCVSubtarget &Subtarget) {
7686   if (Subtarget.hasStdExtZbp()) {
7687     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7688       return GREV;
7689     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7690       return GORC;
7691     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7692       return SHFL;
7693   }
7694 
7695   // fold (or (select cond, 0, y), x) ->
7696   //      (select cond, x, (or x, y))
7697   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7698 }
7699 
7700 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7701   // fold (xor (select cond, 0, y), x) ->
7702   //      (select cond, x, (xor x, y))
7703   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7704 }
7705 
7706 static SDValue
7707 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
7708                                 const RISCVSubtarget &Subtarget) {
7709   SDValue Src = N->getOperand(0);
7710   EVT VT = N->getValueType(0);
7711 
7712   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
7713   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7714       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
7715     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
7716                        Src.getOperand(0));
7717 
7718   // Fold (i64 (sext_inreg (abs X), i32)) ->
7719   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
7720   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
7721   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
7722   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
7723   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
7724   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
7725   // may get combined into an earlier operation so we need to use
7726   // ComputeNumSignBits.
7727   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
7728   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
7729   // we can't assume that X has 33 sign bits. We must check.
7730   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
7731       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
7732       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
7733       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
7734     SDLoc DL(N);
7735     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
7736     SDValue Neg =
7737         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
7738     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
7739                       DAG.getValueType(MVT::i32));
7740     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
7741   }
7742 
7743   return SDValue();
7744 }
7745 
7746 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7747 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7748 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7749                                              bool Commute = false) {
7750   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7751           N->getOpcode() == RISCVISD::SUB_VL) &&
7752          "Unexpected opcode");
7753   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7754   SDValue Op0 = N->getOperand(0);
7755   SDValue Op1 = N->getOperand(1);
7756   if (Commute)
7757     std::swap(Op0, Op1);
7758 
7759   MVT VT = N->getSimpleValueType(0);
7760 
7761   // Determine the narrow size for a widening add/sub.
7762   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7763   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7764                                   VT.getVectorElementCount());
7765 
7766   SDValue Mask = N->getOperand(2);
7767   SDValue VL = N->getOperand(3);
7768 
7769   SDLoc DL(N);
7770 
7771   // If the RHS is a sext or zext, we can form a widening op.
7772   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7773        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7774       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7775     unsigned ExtOpc = Op1.getOpcode();
7776     Op1 = Op1.getOperand(0);
7777     // Re-introduce narrower extends if needed.
7778     if (Op1.getValueType() != NarrowVT)
7779       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7780 
7781     unsigned WOpc;
7782     if (ExtOpc == RISCVISD::VSEXT_VL)
7783       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7784     else
7785       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7786 
7787     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7788   }
7789 
7790   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7791   // sext/zext?
7792 
7793   return SDValue();
7794 }
7795 
7796 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7797 // vwsub(u).vv/vx.
7798 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7799   SDValue Op0 = N->getOperand(0);
7800   SDValue Op1 = N->getOperand(1);
7801   SDValue Mask = N->getOperand(2);
7802   SDValue VL = N->getOperand(3);
7803 
7804   MVT VT = N->getSimpleValueType(0);
7805   MVT NarrowVT = Op1.getSimpleValueType();
7806   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7807 
7808   unsigned VOpc;
7809   switch (N->getOpcode()) {
7810   default: llvm_unreachable("Unexpected opcode");
7811   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7812   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7813   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7814   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7815   }
7816 
7817   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7818                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7819 
7820   SDLoc DL(N);
7821 
7822   // If the LHS is a sext or zext, we can narrow this op to the same size as
7823   // the RHS.
7824   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7825        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7826       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7827     unsigned ExtOpc = Op0.getOpcode();
7828     Op0 = Op0.getOperand(0);
7829     // Re-introduce narrower extends if needed.
7830     if (Op0.getValueType() != NarrowVT)
7831       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7832     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7833   }
7834 
7835   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7836                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7837 
7838   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7839   // to commute and use a vwadd(u).vx instead.
7840   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7841       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
7842     Op0 = Op0.getOperand(1);
7843 
7844     // See if have enough sign bits or zero bits in the scalar to use a
7845     // widening add/sub by splatting to smaller element size.
7846     unsigned EltBits = VT.getScalarSizeInBits();
7847     unsigned ScalarBits = Op0.getValueSizeInBits();
7848     // Make sure we're getting all element bits from the scalar register.
7849     // FIXME: Support implicit sign extension of vmv.v.x?
7850     if (ScalarBits < EltBits)
7851       return SDValue();
7852 
7853     if (IsSigned) {
7854       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7855         return SDValue();
7856     } else {
7857       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7858       if (!DAG.MaskedValueIsZero(Op0, Mask))
7859         return SDValue();
7860     }
7861 
7862     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7863                       DAG.getUNDEF(NarrowVT), Op0, VL);
7864     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7865   }
7866 
7867   return SDValue();
7868 }
7869 
7870 // Try to form VWMUL, VWMULU or VWMULSU.
7871 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7872 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7873                                        bool Commute) {
7874   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7875   SDValue Op0 = N->getOperand(0);
7876   SDValue Op1 = N->getOperand(1);
7877   if (Commute)
7878     std::swap(Op0, Op1);
7879 
7880   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7881   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7882   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7883   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7884     return SDValue();
7885 
7886   SDValue Mask = N->getOperand(2);
7887   SDValue VL = N->getOperand(3);
7888 
7889   // Make sure the mask and VL match.
7890   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7891     return SDValue();
7892 
7893   MVT VT = N->getSimpleValueType(0);
7894 
7895   // Determine the narrow size for a widening multiply.
7896   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7897   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7898                                   VT.getVectorElementCount());
7899 
7900   SDLoc DL(N);
7901 
7902   // See if the other operand is the same opcode.
7903   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7904     if (!Op1.hasOneUse())
7905       return SDValue();
7906 
7907     // Make sure the mask and VL match.
7908     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7909       return SDValue();
7910 
7911     Op1 = Op1.getOperand(0);
7912   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7913     // The operand is a splat of a scalar.
7914 
7915     // The pasthru must be undef for tail agnostic
7916     if (!Op1.getOperand(0).isUndef())
7917       return SDValue();
7918     // The VL must be the same.
7919     if (Op1.getOperand(2) != VL)
7920       return SDValue();
7921 
7922     // Get the scalar value.
7923     Op1 = Op1.getOperand(1);
7924 
7925     // See if have enough sign bits or zero bits in the scalar to use a
7926     // widening multiply by splatting to smaller element size.
7927     unsigned EltBits = VT.getScalarSizeInBits();
7928     unsigned ScalarBits = Op1.getValueSizeInBits();
7929     // Make sure we're getting all element bits from the scalar register.
7930     // FIXME: Support implicit sign extension of vmv.v.x?
7931     if (ScalarBits < EltBits)
7932       return SDValue();
7933 
7934     // If the LHS is a sign extend, try to use vwmul.
7935     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
7936       // Can use vwmul.
7937     } else {
7938       // Otherwise try to use vwmulu or vwmulsu.
7939       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7940       if (DAG.MaskedValueIsZero(Op1, Mask))
7941         IsVWMULSU = IsSignExt;
7942       else
7943         return SDValue();
7944     }
7945 
7946     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7947                       DAG.getUNDEF(NarrowVT), Op1, VL);
7948   } else
7949     return SDValue();
7950 
7951   Op0 = Op0.getOperand(0);
7952 
7953   // Re-introduce narrower extends if needed.
7954   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7955   if (Op0.getValueType() != NarrowVT)
7956     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7957   // vwmulsu requires second operand to be zero extended.
7958   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
7959   if (Op1.getValueType() != NarrowVT)
7960     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7961 
7962   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7963   if (!IsVWMULSU)
7964     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7965   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7966 }
7967 
7968 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7969   switch (Op.getOpcode()) {
7970   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7971   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7972   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7973   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7974   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7975   }
7976 
7977   return RISCVFPRndMode::Invalid;
7978 }
7979 
7980 // Fold
7981 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7982 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7983 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7984 //   (fp_to_int (fceil X))      -> fcvt X, rup
7985 //   (fp_to_int (fround X))     -> fcvt X, rmm
7986 static SDValue performFP_TO_INTCombine(SDNode *N,
7987                                        TargetLowering::DAGCombinerInfo &DCI,
7988                                        const RISCVSubtarget &Subtarget) {
7989   SelectionDAG &DAG = DCI.DAG;
7990   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7991   MVT XLenVT = Subtarget.getXLenVT();
7992 
7993   // Only handle XLen or i32 types. Other types narrower than XLen will
7994   // eventually be legalized to XLenVT.
7995   EVT VT = N->getValueType(0);
7996   if (VT != MVT::i32 && VT != XLenVT)
7997     return SDValue();
7998 
7999   SDValue Src = N->getOperand(0);
8000 
8001   // Ensure the FP type is also legal.
8002   if (!TLI.isTypeLegal(Src.getValueType()))
8003     return SDValue();
8004 
8005   // Don't do this for f16 with Zfhmin and not Zfh.
8006   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8007     return SDValue();
8008 
8009   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8010   if (FRM == RISCVFPRndMode::Invalid)
8011     return SDValue();
8012 
8013   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8014 
8015   unsigned Opc;
8016   if (VT == XLenVT)
8017     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8018   else
8019     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8020 
8021   SDLoc DL(N);
8022   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8023                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8024   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8025 }
8026 
8027 // Fold
8028 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8029 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8030 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8031 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8032 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8033 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8034                                        TargetLowering::DAGCombinerInfo &DCI,
8035                                        const RISCVSubtarget &Subtarget) {
8036   SelectionDAG &DAG = DCI.DAG;
8037   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8038   MVT XLenVT = Subtarget.getXLenVT();
8039 
8040   // Only handle XLen types. Other types narrower than XLen will eventually be
8041   // legalized to XLenVT.
8042   EVT DstVT = N->getValueType(0);
8043   if (DstVT != XLenVT)
8044     return SDValue();
8045 
8046   SDValue Src = N->getOperand(0);
8047 
8048   // Ensure the FP type is also legal.
8049   if (!TLI.isTypeLegal(Src.getValueType()))
8050     return SDValue();
8051 
8052   // Don't do this for f16 with Zfhmin and not Zfh.
8053   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8054     return SDValue();
8055 
8056   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8057 
8058   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8059   if (FRM == RISCVFPRndMode::Invalid)
8060     return SDValue();
8061 
8062   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8063 
8064   unsigned Opc;
8065   if (SatVT == DstVT)
8066     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8067   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8068     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8069   else
8070     return SDValue();
8071   // FIXME: Support other SatVTs by clamping before or after the conversion.
8072 
8073   Src = Src.getOperand(0);
8074 
8075   SDLoc DL(N);
8076   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8077                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8078 
8079   // RISCV FP-to-int conversions saturate to the destination register size, but
8080   // don't produce 0 for nan.
8081   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8082   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8083 }
8084 
8085 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8086 // smaller than XLenVT.
8087 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8088                                         const RISCVSubtarget &Subtarget) {
8089   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8090 
8091   SDValue Src = N->getOperand(0);
8092   if (Src.getOpcode() != ISD::BSWAP)
8093     return SDValue();
8094 
8095   EVT VT = N->getValueType(0);
8096   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8097       !isPowerOf2_32(VT.getSizeInBits()))
8098     return SDValue();
8099 
8100   SDLoc DL(N);
8101   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8102                      DAG.getConstant(7, DL, VT));
8103 }
8104 
8105 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8106                                                DAGCombinerInfo &DCI) const {
8107   SelectionDAG &DAG = DCI.DAG;
8108 
8109   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8110   // bits are demanded. N will be added to the Worklist if it was not deleted.
8111   // Caller should return SDValue(N, 0) if this returns true.
8112   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8113     SDValue Op = N->getOperand(OpNo);
8114     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8115     if (!SimplifyDemandedBits(Op, Mask, DCI))
8116       return false;
8117 
8118     if (N->getOpcode() != ISD::DELETED_NODE)
8119       DCI.AddToWorklist(N);
8120     return true;
8121   };
8122 
8123   switch (N->getOpcode()) {
8124   default:
8125     break;
8126   case RISCVISD::SplitF64: {
8127     SDValue Op0 = N->getOperand(0);
8128     // If the input to SplitF64 is just BuildPairF64 then the operation is
8129     // redundant. Instead, use BuildPairF64's operands directly.
8130     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8131       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8132 
8133     if (Op0->isUndef()) {
8134       SDValue Lo = DAG.getUNDEF(MVT::i32);
8135       SDValue Hi = DAG.getUNDEF(MVT::i32);
8136       return DCI.CombineTo(N, Lo, Hi);
8137     }
8138 
8139     SDLoc DL(N);
8140 
8141     // It's cheaper to materialise two 32-bit integers than to load a double
8142     // from the constant pool and transfer it to integer registers through the
8143     // stack.
8144     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8145       APInt V = C->getValueAPF().bitcastToAPInt();
8146       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8147       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8148       return DCI.CombineTo(N, Lo, Hi);
8149     }
8150 
8151     // This is a target-specific version of a DAGCombine performed in
8152     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8153     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8154     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8155     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8156         !Op0.getNode()->hasOneUse())
8157       break;
8158     SDValue NewSplitF64 =
8159         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8160                     Op0.getOperand(0));
8161     SDValue Lo = NewSplitF64.getValue(0);
8162     SDValue Hi = NewSplitF64.getValue(1);
8163     APInt SignBit = APInt::getSignMask(32);
8164     if (Op0.getOpcode() == ISD::FNEG) {
8165       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8166                                   DAG.getConstant(SignBit, DL, MVT::i32));
8167       return DCI.CombineTo(N, Lo, NewHi);
8168     }
8169     assert(Op0.getOpcode() == ISD::FABS);
8170     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8171                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8172     return DCI.CombineTo(N, Lo, NewHi);
8173   }
8174   case RISCVISD::SLLW:
8175   case RISCVISD::SRAW:
8176   case RISCVISD::SRLW: {
8177     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8178     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8179         SimplifyDemandedLowBitsHelper(1, 5))
8180       return SDValue(N, 0);
8181 
8182     break;
8183   }
8184   case ISD::ROTR:
8185   case ISD::ROTL:
8186   case RISCVISD::RORW:
8187   case RISCVISD::ROLW: {
8188     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8189       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8190       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8191           SimplifyDemandedLowBitsHelper(1, 5))
8192         return SDValue(N, 0);
8193     }
8194 
8195     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8196   }
8197   case RISCVISD::CLZW:
8198   case RISCVISD::CTZW: {
8199     // Only the lower 32 bits of the first operand are read
8200     if (SimplifyDemandedLowBitsHelper(0, 32))
8201       return SDValue(N, 0);
8202     break;
8203   }
8204   case RISCVISD::GREV:
8205   case RISCVISD::GORC: {
8206     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8207     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8208     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8209     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8210       return SDValue(N, 0);
8211 
8212     return combineGREVI_GORCI(N, DAG);
8213   }
8214   case RISCVISD::GREVW:
8215   case RISCVISD::GORCW: {
8216     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8217     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8218         SimplifyDemandedLowBitsHelper(1, 5))
8219       return SDValue(N, 0);
8220 
8221     break;
8222   }
8223   case RISCVISD::SHFL:
8224   case RISCVISD::UNSHFL: {
8225     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8226     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8227     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8228     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8229       return SDValue(N, 0);
8230 
8231     break;
8232   }
8233   case RISCVISD::SHFLW:
8234   case RISCVISD::UNSHFLW: {
8235     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8236     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8237         SimplifyDemandedLowBitsHelper(1, 4))
8238       return SDValue(N, 0);
8239 
8240     break;
8241   }
8242   case RISCVISD::BCOMPRESSW:
8243   case RISCVISD::BDECOMPRESSW: {
8244     // Only the lower 32 bits of LHS and RHS are read.
8245     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8246         SimplifyDemandedLowBitsHelper(1, 32))
8247       return SDValue(N, 0);
8248 
8249     break;
8250   }
8251   case RISCVISD::FSR:
8252   case RISCVISD::FSL:
8253   case RISCVISD::FSRW:
8254   case RISCVISD::FSLW: {
8255     bool IsWInstruction =
8256         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8257     unsigned BitWidth =
8258         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8259     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8260     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8261     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8262       return SDValue(N, 0);
8263 
8264     break;
8265   }
8266   case RISCVISD::FMV_X_ANYEXTH:
8267   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8268     SDLoc DL(N);
8269     SDValue Op0 = N->getOperand(0);
8270     MVT VT = N->getSimpleValueType(0);
8271     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8272     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8273     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8274     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8275          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8276         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8277          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8278       assert(Op0.getOperand(0).getValueType() == VT &&
8279              "Unexpected value type!");
8280       return Op0.getOperand(0);
8281     }
8282 
8283     // This is a target-specific version of a DAGCombine performed in
8284     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8285     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8286     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8287     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8288         !Op0.getNode()->hasOneUse())
8289       break;
8290     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8291     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8292     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8293     if (Op0.getOpcode() == ISD::FNEG)
8294       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8295                          DAG.getConstant(SignBit, DL, VT));
8296 
8297     assert(Op0.getOpcode() == ISD::FABS);
8298     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8299                        DAG.getConstant(~SignBit, DL, VT));
8300   }
8301   case ISD::ADD:
8302     return performADDCombine(N, DAG, Subtarget);
8303   case ISD::SUB:
8304     return performSUBCombine(N, DAG);
8305   case ISD::AND:
8306     return performANDCombine(N, DAG);
8307   case ISD::OR:
8308     return performORCombine(N, DAG, Subtarget);
8309   case ISD::XOR:
8310     return performXORCombine(N, DAG);
8311   case ISD::SIGN_EXTEND_INREG:
8312     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8313   case ISD::ZERO_EXTEND:
8314     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8315     // type legalization. This is safe because fp_to_uint produces poison if
8316     // it overflows.
8317     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8318       SDValue Src = N->getOperand(0);
8319       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8320           isTypeLegal(Src.getOperand(0).getValueType()))
8321         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8322                            Src.getOperand(0));
8323       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8324           isTypeLegal(Src.getOperand(1).getValueType())) {
8325         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8326         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8327                                   Src.getOperand(0), Src.getOperand(1));
8328         DCI.CombineTo(N, Res);
8329         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8330         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8331         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8332       }
8333     }
8334     return SDValue();
8335   case RISCVISD::SELECT_CC: {
8336     // Transform
8337     SDValue LHS = N->getOperand(0);
8338     SDValue RHS = N->getOperand(1);
8339     SDValue TrueV = N->getOperand(3);
8340     SDValue FalseV = N->getOperand(4);
8341 
8342     // If the True and False values are the same, we don't need a select_cc.
8343     if (TrueV == FalseV)
8344       return TrueV;
8345 
8346     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8347     if (!ISD::isIntEqualitySetCC(CCVal))
8348       break;
8349 
8350     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8351     //      (select_cc X, Y, lt, trueV, falseV)
8352     // Sometimes the setcc is introduced after select_cc has been formed.
8353     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8354         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8355       // If we're looking for eq 0 instead of ne 0, we need to invert the
8356       // condition.
8357       bool Invert = CCVal == ISD::SETEQ;
8358       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8359       if (Invert)
8360         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8361 
8362       SDLoc DL(N);
8363       RHS = LHS.getOperand(1);
8364       LHS = LHS.getOperand(0);
8365       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8366 
8367       SDValue TargetCC = DAG.getCondCode(CCVal);
8368       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8369                          {LHS, RHS, TargetCC, TrueV, FalseV});
8370     }
8371 
8372     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8373     //      (select_cc X, Y, eq/ne, trueV, falseV)
8374     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8375       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8376                          {LHS.getOperand(0), LHS.getOperand(1),
8377                           N->getOperand(2), TrueV, FalseV});
8378     // (select_cc X, 1, setne, trueV, falseV) ->
8379     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8380     // This can occur when legalizing some floating point comparisons.
8381     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8382     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8383       SDLoc DL(N);
8384       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8385       SDValue TargetCC = DAG.getCondCode(CCVal);
8386       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8387       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8388                          {LHS, RHS, TargetCC, TrueV, FalseV});
8389     }
8390 
8391     break;
8392   }
8393   case RISCVISD::BR_CC: {
8394     SDValue LHS = N->getOperand(1);
8395     SDValue RHS = N->getOperand(2);
8396     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8397     if (!ISD::isIntEqualitySetCC(CCVal))
8398       break;
8399 
8400     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8401     //      (br_cc X, Y, lt, dest)
8402     // Sometimes the setcc is introduced after br_cc has been formed.
8403     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8404         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8405       // If we're looking for eq 0 instead of ne 0, we need to invert the
8406       // condition.
8407       bool Invert = CCVal == ISD::SETEQ;
8408       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8409       if (Invert)
8410         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8411 
8412       SDLoc DL(N);
8413       RHS = LHS.getOperand(1);
8414       LHS = LHS.getOperand(0);
8415       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8416 
8417       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8418                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8419                          N->getOperand(4));
8420     }
8421 
8422     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8423     //      (br_cc X, Y, eq/ne, trueV, falseV)
8424     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8425       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8426                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8427                          N->getOperand(3), N->getOperand(4));
8428 
8429     // (br_cc X, 1, setne, br_cc) ->
8430     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8431     // This can occur when legalizing some floating point comparisons.
8432     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8433     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8434       SDLoc DL(N);
8435       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8436       SDValue TargetCC = DAG.getCondCode(CCVal);
8437       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8438       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8439                          N->getOperand(0), LHS, RHS, TargetCC,
8440                          N->getOperand(4));
8441     }
8442     break;
8443   }
8444   case ISD::BITREVERSE:
8445     return performBITREVERSECombine(N, DAG, Subtarget);
8446   case ISD::FP_TO_SINT:
8447   case ISD::FP_TO_UINT:
8448     return performFP_TO_INTCombine(N, DCI, Subtarget);
8449   case ISD::FP_TO_SINT_SAT:
8450   case ISD::FP_TO_UINT_SAT:
8451     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8452   case ISD::FCOPYSIGN: {
8453     EVT VT = N->getValueType(0);
8454     if (!VT.isVector())
8455       break;
8456     // There is a form of VFSGNJ which injects the negated sign of its second
8457     // operand. Try and bubble any FNEG up after the extend/round to produce
8458     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8459     // TRUNC=1.
8460     SDValue In2 = N->getOperand(1);
8461     // Avoid cases where the extend/round has multiple uses, as duplicating
8462     // those is typically more expensive than removing a fneg.
8463     if (!In2.hasOneUse())
8464       break;
8465     if (In2.getOpcode() != ISD::FP_EXTEND &&
8466         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8467       break;
8468     In2 = In2.getOperand(0);
8469     if (In2.getOpcode() != ISD::FNEG)
8470       break;
8471     SDLoc DL(N);
8472     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8473     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8474                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8475   }
8476   case ISD::MGATHER:
8477   case ISD::MSCATTER:
8478   case ISD::VP_GATHER:
8479   case ISD::VP_SCATTER: {
8480     if (!DCI.isBeforeLegalize())
8481       break;
8482     SDValue Index, ScaleOp;
8483     bool IsIndexScaled = false;
8484     bool IsIndexSigned = false;
8485     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8486       Index = VPGSN->getIndex();
8487       ScaleOp = VPGSN->getScale();
8488       IsIndexScaled = VPGSN->isIndexScaled();
8489       IsIndexSigned = VPGSN->isIndexSigned();
8490     } else {
8491       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8492       Index = MGSN->getIndex();
8493       ScaleOp = MGSN->getScale();
8494       IsIndexScaled = MGSN->isIndexScaled();
8495       IsIndexSigned = MGSN->isIndexSigned();
8496     }
8497     EVT IndexVT = Index.getValueType();
8498     MVT XLenVT = Subtarget.getXLenVT();
8499     // RISCV indexed loads only support the "unsigned unscaled" addressing
8500     // mode, so anything else must be manually legalized.
8501     bool NeedsIdxLegalization =
8502         IsIndexScaled ||
8503         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8504     if (!NeedsIdxLegalization)
8505       break;
8506 
8507     SDLoc DL(N);
8508 
8509     // Any index legalization should first promote to XLenVT, so we don't lose
8510     // bits when scaling. This may create an illegal index type so we let
8511     // LLVM's legalization take care of the splitting.
8512     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8513     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8514       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8515       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8516                           DL, IndexVT, Index);
8517     }
8518 
8519     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8520     if (IsIndexScaled && Scale != 1) {
8521       // Manually scale the indices by the element size.
8522       // TODO: Sanitize the scale operand here?
8523       // TODO: For VP nodes, should we use VP_SHL here?
8524       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8525       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8526       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8527     }
8528 
8529     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8530     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8531       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8532                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8533                               VPGN->getScale(), VPGN->getMask(),
8534                               VPGN->getVectorLength()},
8535                              VPGN->getMemOperand(), NewIndexTy);
8536     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8537       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8538                               {VPSN->getChain(), VPSN->getValue(),
8539                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8540                                VPSN->getMask(), VPSN->getVectorLength()},
8541                               VPSN->getMemOperand(), NewIndexTy);
8542     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8543       return DAG.getMaskedGather(
8544           N->getVTList(), MGN->getMemoryVT(), DL,
8545           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8546            MGN->getBasePtr(), Index, MGN->getScale()},
8547           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8548     const auto *MSN = cast<MaskedScatterSDNode>(N);
8549     return DAG.getMaskedScatter(
8550         N->getVTList(), MSN->getMemoryVT(), DL,
8551         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8552          Index, MSN->getScale()},
8553         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8554   }
8555   case RISCVISD::SRA_VL:
8556   case RISCVISD::SRL_VL:
8557   case RISCVISD::SHL_VL: {
8558     SDValue ShAmt = N->getOperand(1);
8559     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8560       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8561       SDLoc DL(N);
8562       SDValue VL = N->getOperand(3);
8563       EVT VT = N->getValueType(0);
8564       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8565                           ShAmt.getOperand(1), VL);
8566       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8567                          N->getOperand(2), N->getOperand(3));
8568     }
8569     break;
8570   }
8571   case ISD::SRA:
8572   case ISD::SRL:
8573   case ISD::SHL: {
8574     SDValue ShAmt = N->getOperand(1);
8575     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8576       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8577       SDLoc DL(N);
8578       EVT VT = N->getValueType(0);
8579       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8580                           ShAmt.getOperand(1),
8581                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8582       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8583     }
8584     break;
8585   }
8586   case RISCVISD::ADD_VL:
8587     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8588       return V;
8589     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8590   case RISCVISD::SUB_VL:
8591     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8592   case RISCVISD::VWADD_W_VL:
8593   case RISCVISD::VWADDU_W_VL:
8594   case RISCVISD::VWSUB_W_VL:
8595   case RISCVISD::VWSUBU_W_VL:
8596     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8597   case RISCVISD::MUL_VL:
8598     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8599       return V;
8600     // Mul is commutative.
8601     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8602   case ISD::STORE: {
8603     auto *Store = cast<StoreSDNode>(N);
8604     SDValue Val = Store->getValue();
8605     // Combine store of vmv.x.s to vse with VL of 1.
8606     // FIXME: Support FP.
8607     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8608       SDValue Src = Val.getOperand(0);
8609       EVT VecVT = Src.getValueType();
8610       EVT MemVT = Store->getMemoryVT();
8611       // The memory VT and the element type must match.
8612       if (VecVT.getVectorElementType() == MemVT) {
8613         SDLoc DL(N);
8614         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8615         return DAG.getStoreVP(
8616             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8617             DAG.getConstant(1, DL, MaskVT),
8618             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8619             Store->getMemOperand(), Store->getAddressingMode(),
8620             Store->isTruncatingStore(), /*IsCompress*/ false);
8621       }
8622     }
8623 
8624     break;
8625   }
8626   case ISD::SPLAT_VECTOR: {
8627     EVT VT = N->getValueType(0);
8628     // Only perform this combine on legal MVT types.
8629     if (!isTypeLegal(VT))
8630       break;
8631     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8632                                          DAG, Subtarget))
8633       return Gather;
8634     break;
8635   }
8636   case RISCVISD::VMV_V_X_VL: {
8637     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8638     // scalar input.
8639     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8640     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8641     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8642       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8643         return SDValue(N, 0);
8644 
8645     break;
8646   }
8647   case ISD::INTRINSIC_WO_CHAIN: {
8648     unsigned IntNo = N->getConstantOperandVal(0);
8649     switch (IntNo) {
8650       // By default we do not combine any intrinsic.
8651     default:
8652       return SDValue();
8653     case Intrinsic::riscv_vcpop:
8654     case Intrinsic::riscv_vcpop_mask:
8655     case Intrinsic::riscv_vfirst:
8656     case Intrinsic::riscv_vfirst_mask: {
8657       SDValue VL = N->getOperand(2);
8658       if (IntNo == Intrinsic::riscv_vcpop_mask ||
8659           IntNo == Intrinsic::riscv_vfirst_mask)
8660         VL = N->getOperand(3);
8661       if (!isNullConstant(VL))
8662         return SDValue();
8663       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
8664       SDLoc DL(N);
8665       EVT VT = N->getValueType(0);
8666       if (IntNo == Intrinsic::riscv_vfirst ||
8667           IntNo == Intrinsic::riscv_vfirst_mask)
8668         return DAG.getConstant(-1, DL, VT);
8669       return DAG.getConstant(0, DL, VT);
8670     }
8671     }
8672   }
8673   }
8674 
8675   return SDValue();
8676 }
8677 
8678 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8679     const SDNode *N, CombineLevel Level) const {
8680   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8681   // materialised in fewer instructions than `(OP _, c1)`:
8682   //
8683   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8684   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8685   SDValue N0 = N->getOperand(0);
8686   EVT Ty = N0.getValueType();
8687   if (Ty.isScalarInteger() &&
8688       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8689     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8690     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8691     if (C1 && C2) {
8692       const APInt &C1Int = C1->getAPIntValue();
8693       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8694 
8695       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8696       // and the combine should happen, to potentially allow further combines
8697       // later.
8698       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8699           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8700         return true;
8701 
8702       // We can materialise `c1` in an add immediate, so it's "free", and the
8703       // combine should be prevented.
8704       if (C1Int.getMinSignedBits() <= 64 &&
8705           isLegalAddImmediate(C1Int.getSExtValue()))
8706         return false;
8707 
8708       // Neither constant will fit into an immediate, so find materialisation
8709       // costs.
8710       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8711                                               Subtarget.getFeatureBits(),
8712                                               /*CompressionCost*/true);
8713       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8714           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8715           /*CompressionCost*/true);
8716 
8717       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8718       // combine should be prevented.
8719       if (C1Cost < ShiftedC1Cost)
8720         return false;
8721     }
8722   }
8723   return true;
8724 }
8725 
8726 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8727     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8728     TargetLoweringOpt &TLO) const {
8729   // Delay this optimization as late as possible.
8730   if (!TLO.LegalOps)
8731     return false;
8732 
8733   EVT VT = Op.getValueType();
8734   if (VT.isVector())
8735     return false;
8736 
8737   // Only handle AND for now.
8738   if (Op.getOpcode() != ISD::AND)
8739     return false;
8740 
8741   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8742   if (!C)
8743     return false;
8744 
8745   const APInt &Mask = C->getAPIntValue();
8746 
8747   // Clear all non-demanded bits initially.
8748   APInt ShrunkMask = Mask & DemandedBits;
8749 
8750   // Try to make a smaller immediate by setting undemanded bits.
8751 
8752   APInt ExpandedMask = Mask | ~DemandedBits;
8753 
8754   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8755     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8756   };
8757   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8758     if (NewMask == Mask)
8759       return true;
8760     SDLoc DL(Op);
8761     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8762     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8763     return TLO.CombineTo(Op, NewOp);
8764   };
8765 
8766   // If the shrunk mask fits in sign extended 12 bits, let the target
8767   // independent code apply it.
8768   if (ShrunkMask.isSignedIntN(12))
8769     return false;
8770 
8771   // Preserve (and X, 0xffff) when zext.h is supported.
8772   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8773     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8774     if (IsLegalMask(NewMask))
8775       return UseMask(NewMask);
8776   }
8777 
8778   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8779   if (VT == MVT::i64) {
8780     APInt NewMask = APInt(64, 0xffffffff);
8781     if (IsLegalMask(NewMask))
8782       return UseMask(NewMask);
8783   }
8784 
8785   // For the remaining optimizations, we need to be able to make a negative
8786   // number through a combination of mask and undemanded bits.
8787   if (!ExpandedMask.isNegative())
8788     return false;
8789 
8790   // What is the fewest number of bits we need to represent the negative number.
8791   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8792 
8793   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8794   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8795   APInt NewMask = ShrunkMask;
8796   if (MinSignedBits <= 12)
8797     NewMask.setBitsFrom(11);
8798   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8799     NewMask.setBitsFrom(31);
8800   else
8801     return false;
8802 
8803   // Check that our new mask is a subset of the demanded mask.
8804   assert(IsLegalMask(NewMask));
8805   return UseMask(NewMask);
8806 }
8807 
8808 static void computeGREV(APInt &Src, unsigned ShAmt) {
8809   ShAmt &= Src.getBitWidth() - 1;
8810   uint64_t x = Src.getZExtValue();
8811   if (ShAmt & 1)
8812     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8813   if (ShAmt & 2)
8814     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8815   if (ShAmt & 4)
8816     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8817   if (ShAmt & 8)
8818     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8819   if (ShAmt & 16)
8820     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8821   if (ShAmt & 32)
8822     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8823   Src = x;
8824 }
8825 
8826 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8827                                                         KnownBits &Known,
8828                                                         const APInt &DemandedElts,
8829                                                         const SelectionDAG &DAG,
8830                                                         unsigned Depth) const {
8831   unsigned BitWidth = Known.getBitWidth();
8832   unsigned Opc = Op.getOpcode();
8833   assert((Opc >= ISD::BUILTIN_OP_END ||
8834           Opc == ISD::INTRINSIC_WO_CHAIN ||
8835           Opc == ISD::INTRINSIC_W_CHAIN ||
8836           Opc == ISD::INTRINSIC_VOID) &&
8837          "Should use MaskedValueIsZero if you don't know whether Op"
8838          " is a target node!");
8839 
8840   Known.resetAll();
8841   switch (Opc) {
8842   default: break;
8843   case RISCVISD::SELECT_CC: {
8844     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8845     // If we don't know any bits, early out.
8846     if (Known.isUnknown())
8847       break;
8848     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8849 
8850     // Only known if known in both the LHS and RHS.
8851     Known = KnownBits::commonBits(Known, Known2);
8852     break;
8853   }
8854   case RISCVISD::REMUW: {
8855     KnownBits Known2;
8856     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8857     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8858     // We only care about the lower 32 bits.
8859     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8860     // Restore the original width by sign extending.
8861     Known = Known.sext(BitWidth);
8862     break;
8863   }
8864   case RISCVISD::DIVUW: {
8865     KnownBits Known2;
8866     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8867     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8868     // We only care about the lower 32 bits.
8869     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8870     // Restore the original width by sign extending.
8871     Known = Known.sext(BitWidth);
8872     break;
8873   }
8874   case RISCVISD::CTZW: {
8875     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8876     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8877     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8878     Known.Zero.setBitsFrom(LowBits);
8879     break;
8880   }
8881   case RISCVISD::CLZW: {
8882     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8883     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8884     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8885     Known.Zero.setBitsFrom(LowBits);
8886     break;
8887   }
8888   case RISCVISD::GREV: {
8889     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8890       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8891       unsigned ShAmt = C->getZExtValue();
8892       computeGREV(Known.Zero, ShAmt);
8893       computeGREV(Known.One, ShAmt);
8894     }
8895     break;
8896   }
8897   case RISCVISD::READ_VLENB: {
8898     // If we know the minimum VLen from Zvl extensions, we can use that to
8899     // determine the trailing zeros of VLENB.
8900     // FIXME: Limit to 128 bit vectors until we have more testing.
8901     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8902     if (MinVLenB > 0)
8903       Known.Zero.setLowBits(Log2_32(MinVLenB));
8904     // We assume VLENB is no more than 65536 / 8 bytes.
8905     Known.Zero.setBitsFrom(14);
8906     break;
8907   }
8908   case ISD::INTRINSIC_W_CHAIN:
8909   case ISD::INTRINSIC_WO_CHAIN: {
8910     unsigned IntNo =
8911         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8912     switch (IntNo) {
8913     default:
8914       // We can't do anything for most intrinsics.
8915       break;
8916     case Intrinsic::riscv_vsetvli:
8917     case Intrinsic::riscv_vsetvlimax:
8918     case Intrinsic::riscv_vsetvli_opt:
8919     case Intrinsic::riscv_vsetvlimax_opt:
8920       // Assume that VL output is positive and would fit in an int32_t.
8921       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8922       if (BitWidth >= 32)
8923         Known.Zero.setBitsFrom(31);
8924       break;
8925     }
8926     break;
8927   }
8928   }
8929 }
8930 
8931 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8932     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8933     unsigned Depth) const {
8934   switch (Op.getOpcode()) {
8935   default:
8936     break;
8937   case RISCVISD::SELECT_CC: {
8938     unsigned Tmp =
8939         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8940     if (Tmp == 1) return 1;  // Early out.
8941     unsigned Tmp2 =
8942         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8943     return std::min(Tmp, Tmp2);
8944   }
8945   case RISCVISD::SLLW:
8946   case RISCVISD::SRAW:
8947   case RISCVISD::SRLW:
8948   case RISCVISD::DIVW:
8949   case RISCVISD::DIVUW:
8950   case RISCVISD::REMUW:
8951   case RISCVISD::ROLW:
8952   case RISCVISD::RORW:
8953   case RISCVISD::GREVW:
8954   case RISCVISD::GORCW:
8955   case RISCVISD::FSLW:
8956   case RISCVISD::FSRW:
8957   case RISCVISD::SHFLW:
8958   case RISCVISD::UNSHFLW:
8959   case RISCVISD::BCOMPRESSW:
8960   case RISCVISD::BDECOMPRESSW:
8961   case RISCVISD::BFPW:
8962   case RISCVISD::FCVT_W_RV64:
8963   case RISCVISD::FCVT_WU_RV64:
8964   case RISCVISD::STRICT_FCVT_W_RV64:
8965   case RISCVISD::STRICT_FCVT_WU_RV64:
8966     // TODO: As the result is sign-extended, this is conservatively correct. A
8967     // more precise answer could be calculated for SRAW depending on known
8968     // bits in the shift amount.
8969     return 33;
8970   case RISCVISD::SHFL:
8971   case RISCVISD::UNSHFL: {
8972     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8973     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8974     // will stay within the upper 32 bits. If there were more than 32 sign bits
8975     // before there will be at least 33 sign bits after.
8976     if (Op.getValueType() == MVT::i64 &&
8977         isa<ConstantSDNode>(Op.getOperand(1)) &&
8978         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8979       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8980       if (Tmp > 32)
8981         return 33;
8982     }
8983     break;
8984   }
8985   case RISCVISD::VMV_X_S: {
8986     // The number of sign bits of the scalar result is computed by obtaining the
8987     // element type of the input vector operand, subtracting its width from the
8988     // XLEN, and then adding one (sign bit within the element type). If the
8989     // element type is wider than XLen, the least-significant XLEN bits are
8990     // taken.
8991     unsigned XLen = Subtarget.getXLen();
8992     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
8993     if (EltBits <= XLen)
8994       return XLen - EltBits + 1;
8995     break;
8996   }
8997   }
8998 
8999   return 1;
9000 }
9001 
9002 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9003                                                   MachineBasicBlock *BB) {
9004   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9005 
9006   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9007   // Should the count have wrapped while it was being read, we need to try
9008   // again.
9009   // ...
9010   // read:
9011   // rdcycleh x3 # load high word of cycle
9012   // rdcycle  x2 # load low word of cycle
9013   // rdcycleh x4 # load high word of cycle
9014   // bne x3, x4, read # check if high word reads match, otherwise try again
9015   // ...
9016 
9017   MachineFunction &MF = *BB->getParent();
9018   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9019   MachineFunction::iterator It = ++BB->getIterator();
9020 
9021   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9022   MF.insert(It, LoopMBB);
9023 
9024   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9025   MF.insert(It, DoneMBB);
9026 
9027   // Transfer the remainder of BB and its successor edges to DoneMBB.
9028   DoneMBB->splice(DoneMBB->begin(), BB,
9029                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9030   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9031 
9032   BB->addSuccessor(LoopMBB);
9033 
9034   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9035   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9036   Register LoReg = MI.getOperand(0).getReg();
9037   Register HiReg = MI.getOperand(1).getReg();
9038   DebugLoc DL = MI.getDebugLoc();
9039 
9040   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9041   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9042       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9043       .addReg(RISCV::X0);
9044   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9045       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9046       .addReg(RISCV::X0);
9047   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9048       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9049       .addReg(RISCV::X0);
9050 
9051   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9052       .addReg(HiReg)
9053       .addReg(ReadAgainReg)
9054       .addMBB(LoopMBB);
9055 
9056   LoopMBB->addSuccessor(LoopMBB);
9057   LoopMBB->addSuccessor(DoneMBB);
9058 
9059   MI.eraseFromParent();
9060 
9061   return DoneMBB;
9062 }
9063 
9064 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9065                                              MachineBasicBlock *BB) {
9066   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9067 
9068   MachineFunction &MF = *BB->getParent();
9069   DebugLoc DL = MI.getDebugLoc();
9070   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9071   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9072   Register LoReg = MI.getOperand(0).getReg();
9073   Register HiReg = MI.getOperand(1).getReg();
9074   Register SrcReg = MI.getOperand(2).getReg();
9075   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9076   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9077 
9078   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9079                           RI);
9080   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9081   MachineMemOperand *MMOLo =
9082       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9083   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9084       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9085   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9086       .addFrameIndex(FI)
9087       .addImm(0)
9088       .addMemOperand(MMOLo);
9089   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9090       .addFrameIndex(FI)
9091       .addImm(4)
9092       .addMemOperand(MMOHi);
9093   MI.eraseFromParent(); // The pseudo instruction is gone now.
9094   return BB;
9095 }
9096 
9097 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9098                                                  MachineBasicBlock *BB) {
9099   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9100          "Unexpected instruction");
9101 
9102   MachineFunction &MF = *BB->getParent();
9103   DebugLoc DL = MI.getDebugLoc();
9104   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9105   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9106   Register DstReg = MI.getOperand(0).getReg();
9107   Register LoReg = MI.getOperand(1).getReg();
9108   Register HiReg = MI.getOperand(2).getReg();
9109   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9110   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9111 
9112   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9113   MachineMemOperand *MMOLo =
9114       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9115   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9116       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9117   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9118       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9119       .addFrameIndex(FI)
9120       .addImm(0)
9121       .addMemOperand(MMOLo);
9122   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9123       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9124       .addFrameIndex(FI)
9125       .addImm(4)
9126       .addMemOperand(MMOHi);
9127   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9128   MI.eraseFromParent(); // The pseudo instruction is gone now.
9129   return BB;
9130 }
9131 
9132 static bool isSelectPseudo(MachineInstr &MI) {
9133   switch (MI.getOpcode()) {
9134   default:
9135     return false;
9136   case RISCV::Select_GPR_Using_CC_GPR:
9137   case RISCV::Select_FPR16_Using_CC_GPR:
9138   case RISCV::Select_FPR32_Using_CC_GPR:
9139   case RISCV::Select_FPR64_Using_CC_GPR:
9140     return true;
9141   }
9142 }
9143 
9144 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9145                                         unsigned RelOpcode, unsigned EqOpcode,
9146                                         const RISCVSubtarget &Subtarget) {
9147   DebugLoc DL = MI.getDebugLoc();
9148   Register DstReg = MI.getOperand(0).getReg();
9149   Register Src1Reg = MI.getOperand(1).getReg();
9150   Register Src2Reg = MI.getOperand(2).getReg();
9151   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9152   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9153   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9154 
9155   // Save the current FFLAGS.
9156   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9157 
9158   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9159                  .addReg(Src1Reg)
9160                  .addReg(Src2Reg);
9161   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9162     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9163 
9164   // Restore the FFLAGS.
9165   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9166       .addReg(SavedFFlags, RegState::Kill);
9167 
9168   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9169   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9170                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9171                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9172   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9173     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9174 
9175   // Erase the pseudoinstruction.
9176   MI.eraseFromParent();
9177   return BB;
9178 }
9179 
9180 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9181                                            MachineBasicBlock *BB,
9182                                            const RISCVSubtarget &Subtarget) {
9183   // To "insert" Select_* instructions, we actually have to insert the triangle
9184   // control-flow pattern.  The incoming instructions know the destination vreg
9185   // to set, the condition code register to branch on, the true/false values to
9186   // select between, and the condcode to use to select the appropriate branch.
9187   //
9188   // We produce the following control flow:
9189   //     HeadMBB
9190   //     |  \
9191   //     |  IfFalseMBB
9192   //     | /
9193   //    TailMBB
9194   //
9195   // When we find a sequence of selects we attempt to optimize their emission
9196   // by sharing the control flow. Currently we only handle cases where we have
9197   // multiple selects with the exact same condition (same LHS, RHS and CC).
9198   // The selects may be interleaved with other instructions if the other
9199   // instructions meet some requirements we deem safe:
9200   // - They are debug instructions. Otherwise,
9201   // - They do not have side-effects, do not access memory and their inputs do
9202   //   not depend on the results of the select pseudo-instructions.
9203   // The TrueV/FalseV operands of the selects cannot depend on the result of
9204   // previous selects in the sequence.
9205   // These conditions could be further relaxed. See the X86 target for a
9206   // related approach and more information.
9207   Register LHS = MI.getOperand(1).getReg();
9208   Register RHS = MI.getOperand(2).getReg();
9209   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9210 
9211   SmallVector<MachineInstr *, 4> SelectDebugValues;
9212   SmallSet<Register, 4> SelectDests;
9213   SelectDests.insert(MI.getOperand(0).getReg());
9214 
9215   MachineInstr *LastSelectPseudo = &MI;
9216 
9217   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9218        SequenceMBBI != E; ++SequenceMBBI) {
9219     if (SequenceMBBI->isDebugInstr())
9220       continue;
9221     else if (isSelectPseudo(*SequenceMBBI)) {
9222       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9223           SequenceMBBI->getOperand(2).getReg() != RHS ||
9224           SequenceMBBI->getOperand(3).getImm() != CC ||
9225           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9226           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9227         break;
9228       LastSelectPseudo = &*SequenceMBBI;
9229       SequenceMBBI->collectDebugValues(SelectDebugValues);
9230       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9231     } else {
9232       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9233           SequenceMBBI->mayLoadOrStore())
9234         break;
9235       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9236             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9237           }))
9238         break;
9239     }
9240   }
9241 
9242   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9243   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9244   DebugLoc DL = MI.getDebugLoc();
9245   MachineFunction::iterator I = ++BB->getIterator();
9246 
9247   MachineBasicBlock *HeadMBB = BB;
9248   MachineFunction *F = BB->getParent();
9249   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9250   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9251 
9252   F->insert(I, IfFalseMBB);
9253   F->insert(I, TailMBB);
9254 
9255   // Transfer debug instructions associated with the selects to TailMBB.
9256   for (MachineInstr *DebugInstr : SelectDebugValues) {
9257     TailMBB->push_back(DebugInstr->removeFromParent());
9258   }
9259 
9260   // Move all instructions after the sequence to TailMBB.
9261   TailMBB->splice(TailMBB->end(), HeadMBB,
9262                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9263   // Update machine-CFG edges by transferring all successors of the current
9264   // block to the new block which will contain the Phi nodes for the selects.
9265   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9266   // Set the successors for HeadMBB.
9267   HeadMBB->addSuccessor(IfFalseMBB);
9268   HeadMBB->addSuccessor(TailMBB);
9269 
9270   // Insert appropriate branch.
9271   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9272     .addReg(LHS)
9273     .addReg(RHS)
9274     .addMBB(TailMBB);
9275 
9276   // IfFalseMBB just falls through to TailMBB.
9277   IfFalseMBB->addSuccessor(TailMBB);
9278 
9279   // Create PHIs for all of the select pseudo-instructions.
9280   auto SelectMBBI = MI.getIterator();
9281   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9282   auto InsertionPoint = TailMBB->begin();
9283   while (SelectMBBI != SelectEnd) {
9284     auto Next = std::next(SelectMBBI);
9285     if (isSelectPseudo(*SelectMBBI)) {
9286       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9287       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9288               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9289           .addReg(SelectMBBI->getOperand(4).getReg())
9290           .addMBB(HeadMBB)
9291           .addReg(SelectMBBI->getOperand(5).getReg())
9292           .addMBB(IfFalseMBB);
9293       SelectMBBI->eraseFromParent();
9294     }
9295     SelectMBBI = Next;
9296   }
9297 
9298   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9299   return TailMBB;
9300 }
9301 
9302 MachineBasicBlock *
9303 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9304                                                  MachineBasicBlock *BB) const {
9305   switch (MI.getOpcode()) {
9306   default:
9307     llvm_unreachable("Unexpected instr type to insert");
9308   case RISCV::ReadCycleWide:
9309     assert(!Subtarget.is64Bit() &&
9310            "ReadCycleWrite is only to be used on riscv32");
9311     return emitReadCycleWidePseudo(MI, BB);
9312   case RISCV::Select_GPR_Using_CC_GPR:
9313   case RISCV::Select_FPR16_Using_CC_GPR:
9314   case RISCV::Select_FPR32_Using_CC_GPR:
9315   case RISCV::Select_FPR64_Using_CC_GPR:
9316     return emitSelectPseudo(MI, BB, Subtarget);
9317   case RISCV::BuildPairF64Pseudo:
9318     return emitBuildPairF64Pseudo(MI, BB);
9319   case RISCV::SplitF64Pseudo:
9320     return emitSplitF64Pseudo(MI, BB);
9321   case RISCV::PseudoQuietFLE_H:
9322     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9323   case RISCV::PseudoQuietFLT_H:
9324     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9325   case RISCV::PseudoQuietFLE_S:
9326     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9327   case RISCV::PseudoQuietFLT_S:
9328     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9329   case RISCV::PseudoQuietFLE_D:
9330     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9331   case RISCV::PseudoQuietFLT_D:
9332     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9333   }
9334 }
9335 
9336 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9337                                                         SDNode *Node) const {
9338   // Add FRM dependency to any instructions with dynamic rounding mode.
9339   unsigned Opc = MI.getOpcode();
9340   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9341   if (Idx < 0)
9342     return;
9343   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9344     return;
9345   // If the instruction already reads FRM, don't add another read.
9346   if (MI.readsRegister(RISCV::FRM))
9347     return;
9348   MI.addOperand(
9349       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9350 }
9351 
9352 // Calling Convention Implementation.
9353 // The expectations for frontend ABI lowering vary from target to target.
9354 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9355 // details, but this is a longer term goal. For now, we simply try to keep the
9356 // role of the frontend as simple and well-defined as possible. The rules can
9357 // be summarised as:
9358 // * Never split up large scalar arguments. We handle them here.
9359 // * If a hardfloat calling convention is being used, and the struct may be
9360 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9361 // available, then pass as two separate arguments. If either the GPRs or FPRs
9362 // are exhausted, then pass according to the rule below.
9363 // * If a struct could never be passed in registers or directly in a stack
9364 // slot (as it is larger than 2*XLEN and the floating point rules don't
9365 // apply), then pass it using a pointer with the byval attribute.
9366 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9367 // word-sized array or a 2*XLEN scalar (depending on alignment).
9368 // * The frontend can determine whether a struct is returned by reference or
9369 // not based on its size and fields. If it will be returned by reference, the
9370 // frontend must modify the prototype so a pointer with the sret annotation is
9371 // passed as the first argument. This is not necessary for large scalar
9372 // returns.
9373 // * Struct return values and varargs should be coerced to structs containing
9374 // register-size fields in the same situations they would be for fixed
9375 // arguments.
9376 
9377 static const MCPhysReg ArgGPRs[] = {
9378   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9379   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9380 };
9381 static const MCPhysReg ArgFPR16s[] = {
9382   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9383   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9384 };
9385 static const MCPhysReg ArgFPR32s[] = {
9386   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9387   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9388 };
9389 static const MCPhysReg ArgFPR64s[] = {
9390   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9391   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9392 };
9393 // This is an interim calling convention and it may be changed in the future.
9394 static const MCPhysReg ArgVRs[] = {
9395     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9396     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9397     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9398 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9399                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9400                                      RISCV::V20M2, RISCV::V22M2};
9401 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9402                                      RISCV::V20M4};
9403 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9404 
9405 // Pass a 2*XLEN argument that has been split into two XLEN values through
9406 // registers or the stack as necessary.
9407 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9408                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9409                                 MVT ValVT2, MVT LocVT2,
9410                                 ISD::ArgFlagsTy ArgFlags2) {
9411   unsigned XLenInBytes = XLen / 8;
9412   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9413     // At least one half can be passed via register.
9414     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9415                                      VA1.getLocVT(), CCValAssign::Full));
9416   } else {
9417     // Both halves must be passed on the stack, with proper alignment.
9418     Align StackAlign =
9419         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9420     State.addLoc(
9421         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9422                             State.AllocateStack(XLenInBytes, StackAlign),
9423                             VA1.getLocVT(), CCValAssign::Full));
9424     State.addLoc(CCValAssign::getMem(
9425         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9426         LocVT2, CCValAssign::Full));
9427     return false;
9428   }
9429 
9430   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9431     // The second half can also be passed via register.
9432     State.addLoc(
9433         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9434   } else {
9435     // The second half is passed via the stack, without additional alignment.
9436     State.addLoc(CCValAssign::getMem(
9437         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9438         LocVT2, CCValAssign::Full));
9439   }
9440 
9441   return false;
9442 }
9443 
9444 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9445                                Optional<unsigned> FirstMaskArgument,
9446                                CCState &State, const RISCVTargetLowering &TLI) {
9447   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9448   if (RC == &RISCV::VRRegClass) {
9449     // Assign the first mask argument to V0.
9450     // This is an interim calling convention and it may be changed in the
9451     // future.
9452     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9453       return State.AllocateReg(RISCV::V0);
9454     return State.AllocateReg(ArgVRs);
9455   }
9456   if (RC == &RISCV::VRM2RegClass)
9457     return State.AllocateReg(ArgVRM2s);
9458   if (RC == &RISCV::VRM4RegClass)
9459     return State.AllocateReg(ArgVRM4s);
9460   if (RC == &RISCV::VRM8RegClass)
9461     return State.AllocateReg(ArgVRM8s);
9462   llvm_unreachable("Unhandled register class for ValueType");
9463 }
9464 
9465 // Implements the RISC-V calling convention. Returns true upon failure.
9466 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9467                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9468                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9469                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9470                      Optional<unsigned> FirstMaskArgument) {
9471   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9472   assert(XLen == 32 || XLen == 64);
9473   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9474 
9475   // Any return value split in to more than two values can't be returned
9476   // directly. Vectors are returned via the available vector registers.
9477   if (!LocVT.isVector() && IsRet && ValNo > 1)
9478     return true;
9479 
9480   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9481   // variadic argument, or if no F16/F32 argument registers are available.
9482   bool UseGPRForF16_F32 = true;
9483   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9484   // variadic argument, or if no F64 argument registers are available.
9485   bool UseGPRForF64 = true;
9486 
9487   switch (ABI) {
9488   default:
9489     llvm_unreachable("Unexpected ABI");
9490   case RISCVABI::ABI_ILP32:
9491   case RISCVABI::ABI_LP64:
9492     break;
9493   case RISCVABI::ABI_ILP32F:
9494   case RISCVABI::ABI_LP64F:
9495     UseGPRForF16_F32 = !IsFixed;
9496     break;
9497   case RISCVABI::ABI_ILP32D:
9498   case RISCVABI::ABI_LP64D:
9499     UseGPRForF16_F32 = !IsFixed;
9500     UseGPRForF64 = !IsFixed;
9501     break;
9502   }
9503 
9504   // FPR16, FPR32, and FPR64 alias each other.
9505   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9506     UseGPRForF16_F32 = true;
9507     UseGPRForF64 = true;
9508   }
9509 
9510   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9511   // similar local variables rather than directly checking against the target
9512   // ABI.
9513 
9514   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9515     LocVT = XLenVT;
9516     LocInfo = CCValAssign::BCvt;
9517   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9518     LocVT = MVT::i64;
9519     LocInfo = CCValAssign::BCvt;
9520   }
9521 
9522   // If this is a variadic argument, the RISC-V calling convention requires
9523   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9524   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9525   // be used regardless of whether the original argument was split during
9526   // legalisation or not. The argument will not be passed by registers if the
9527   // original type is larger than 2*XLEN, so the register alignment rule does
9528   // not apply.
9529   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9530   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9531       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9532     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9533     // Skip 'odd' register if necessary.
9534     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9535       State.AllocateReg(ArgGPRs);
9536   }
9537 
9538   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9539   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9540       State.getPendingArgFlags();
9541 
9542   assert(PendingLocs.size() == PendingArgFlags.size() &&
9543          "PendingLocs and PendingArgFlags out of sync");
9544 
9545   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9546   // registers are exhausted.
9547   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9548     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9549            "Can't lower f64 if it is split");
9550     // Depending on available argument GPRS, f64 may be passed in a pair of
9551     // GPRs, split between a GPR and the stack, or passed completely on the
9552     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9553     // cases.
9554     Register Reg = State.AllocateReg(ArgGPRs);
9555     LocVT = MVT::i32;
9556     if (!Reg) {
9557       unsigned StackOffset = State.AllocateStack(8, Align(8));
9558       State.addLoc(
9559           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9560       return false;
9561     }
9562     if (!State.AllocateReg(ArgGPRs))
9563       State.AllocateStack(4, Align(4));
9564     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9565     return false;
9566   }
9567 
9568   // Fixed-length vectors are located in the corresponding scalable-vector
9569   // container types.
9570   if (ValVT.isFixedLengthVector())
9571     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9572 
9573   // Split arguments might be passed indirectly, so keep track of the pending
9574   // values. Split vectors are passed via a mix of registers and indirectly, so
9575   // treat them as we would any other argument.
9576   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9577     LocVT = XLenVT;
9578     LocInfo = CCValAssign::Indirect;
9579     PendingLocs.push_back(
9580         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9581     PendingArgFlags.push_back(ArgFlags);
9582     if (!ArgFlags.isSplitEnd()) {
9583       return false;
9584     }
9585   }
9586 
9587   // If the split argument only had two elements, it should be passed directly
9588   // in registers or on the stack.
9589   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9590       PendingLocs.size() <= 2) {
9591     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9592     // Apply the normal calling convention rules to the first half of the
9593     // split argument.
9594     CCValAssign VA = PendingLocs[0];
9595     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9596     PendingLocs.clear();
9597     PendingArgFlags.clear();
9598     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9599                                ArgFlags);
9600   }
9601 
9602   // Allocate to a register if possible, or else a stack slot.
9603   Register Reg;
9604   unsigned StoreSizeBytes = XLen / 8;
9605   Align StackAlign = Align(XLen / 8);
9606 
9607   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9608     Reg = State.AllocateReg(ArgFPR16s);
9609   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9610     Reg = State.AllocateReg(ArgFPR32s);
9611   else if (ValVT == MVT::f64 && !UseGPRForF64)
9612     Reg = State.AllocateReg(ArgFPR64s);
9613   else if (ValVT.isVector()) {
9614     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9615     if (!Reg) {
9616       // For return values, the vector must be passed fully via registers or
9617       // via the stack.
9618       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9619       // but we're using all of them.
9620       if (IsRet)
9621         return true;
9622       // Try using a GPR to pass the address
9623       if ((Reg = State.AllocateReg(ArgGPRs))) {
9624         LocVT = XLenVT;
9625         LocInfo = CCValAssign::Indirect;
9626       } else if (ValVT.isScalableVector()) {
9627         LocVT = XLenVT;
9628         LocInfo = CCValAssign::Indirect;
9629       } else {
9630         // Pass fixed-length vectors on the stack.
9631         LocVT = ValVT;
9632         StoreSizeBytes = ValVT.getStoreSize();
9633         // Align vectors to their element sizes, being careful for vXi1
9634         // vectors.
9635         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9636       }
9637     }
9638   } else {
9639     Reg = State.AllocateReg(ArgGPRs);
9640   }
9641 
9642   unsigned StackOffset =
9643       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9644 
9645   // If we reach this point and PendingLocs is non-empty, we must be at the
9646   // end of a split argument that must be passed indirectly.
9647   if (!PendingLocs.empty()) {
9648     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9649     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9650 
9651     for (auto &It : PendingLocs) {
9652       if (Reg)
9653         It.convertToReg(Reg);
9654       else
9655         It.convertToMem(StackOffset);
9656       State.addLoc(It);
9657     }
9658     PendingLocs.clear();
9659     PendingArgFlags.clear();
9660     return false;
9661   }
9662 
9663   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9664           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9665          "Expected an XLenVT or vector types at this stage");
9666 
9667   if (Reg) {
9668     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9669     return false;
9670   }
9671 
9672   // When a floating-point value is passed on the stack, no bit-conversion is
9673   // needed.
9674   if (ValVT.isFloatingPoint()) {
9675     LocVT = ValVT;
9676     LocInfo = CCValAssign::Full;
9677   }
9678   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9679   return false;
9680 }
9681 
9682 template <typename ArgTy>
9683 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9684   for (const auto &ArgIdx : enumerate(Args)) {
9685     MVT ArgVT = ArgIdx.value().VT;
9686     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9687       return ArgIdx.index();
9688   }
9689   return None;
9690 }
9691 
9692 void RISCVTargetLowering::analyzeInputArgs(
9693     MachineFunction &MF, CCState &CCInfo,
9694     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9695     RISCVCCAssignFn Fn) const {
9696   unsigned NumArgs = Ins.size();
9697   FunctionType *FType = MF.getFunction().getFunctionType();
9698 
9699   Optional<unsigned> FirstMaskArgument;
9700   if (Subtarget.hasVInstructions())
9701     FirstMaskArgument = preAssignMask(Ins);
9702 
9703   for (unsigned i = 0; i != NumArgs; ++i) {
9704     MVT ArgVT = Ins[i].VT;
9705     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9706 
9707     Type *ArgTy = nullptr;
9708     if (IsRet)
9709       ArgTy = FType->getReturnType();
9710     else if (Ins[i].isOrigArg())
9711       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9712 
9713     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9714     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9715            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9716            FirstMaskArgument)) {
9717       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9718                         << EVT(ArgVT).getEVTString() << '\n');
9719       llvm_unreachable(nullptr);
9720     }
9721   }
9722 }
9723 
9724 void RISCVTargetLowering::analyzeOutputArgs(
9725     MachineFunction &MF, CCState &CCInfo,
9726     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9727     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9728   unsigned NumArgs = Outs.size();
9729 
9730   Optional<unsigned> FirstMaskArgument;
9731   if (Subtarget.hasVInstructions())
9732     FirstMaskArgument = preAssignMask(Outs);
9733 
9734   for (unsigned i = 0; i != NumArgs; i++) {
9735     MVT ArgVT = Outs[i].VT;
9736     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9737     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9738 
9739     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9740     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9741            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9742            FirstMaskArgument)) {
9743       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9744                         << EVT(ArgVT).getEVTString() << "\n");
9745       llvm_unreachable(nullptr);
9746     }
9747   }
9748 }
9749 
9750 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9751 // values.
9752 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9753                                    const CCValAssign &VA, const SDLoc &DL,
9754                                    const RISCVSubtarget &Subtarget) {
9755   switch (VA.getLocInfo()) {
9756   default:
9757     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9758   case CCValAssign::Full:
9759     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9760       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9761     break;
9762   case CCValAssign::BCvt:
9763     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9764       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9765     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9766       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9767     else
9768       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9769     break;
9770   }
9771   return Val;
9772 }
9773 
9774 // The caller is responsible for loading the full value if the argument is
9775 // passed with CCValAssign::Indirect.
9776 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9777                                 const CCValAssign &VA, const SDLoc &DL,
9778                                 const RISCVTargetLowering &TLI) {
9779   MachineFunction &MF = DAG.getMachineFunction();
9780   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9781   EVT LocVT = VA.getLocVT();
9782   SDValue Val;
9783   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9784   Register VReg = RegInfo.createVirtualRegister(RC);
9785   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9786   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9787 
9788   if (VA.getLocInfo() == CCValAssign::Indirect)
9789     return Val;
9790 
9791   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9792 }
9793 
9794 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9795                                    const CCValAssign &VA, const SDLoc &DL,
9796                                    const RISCVSubtarget &Subtarget) {
9797   EVT LocVT = VA.getLocVT();
9798 
9799   switch (VA.getLocInfo()) {
9800   default:
9801     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9802   case CCValAssign::Full:
9803     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9804       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9805     break;
9806   case CCValAssign::BCvt:
9807     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9808       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9809     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9810       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9811     else
9812       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9813     break;
9814   }
9815   return Val;
9816 }
9817 
9818 // The caller is responsible for loading the full value if the argument is
9819 // passed with CCValAssign::Indirect.
9820 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9821                                 const CCValAssign &VA, const SDLoc &DL) {
9822   MachineFunction &MF = DAG.getMachineFunction();
9823   MachineFrameInfo &MFI = MF.getFrameInfo();
9824   EVT LocVT = VA.getLocVT();
9825   EVT ValVT = VA.getValVT();
9826   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9827   if (ValVT.isScalableVector()) {
9828     // When the value is a scalable vector, we save the pointer which points to
9829     // the scalable vector value in the stack. The ValVT will be the pointer
9830     // type, instead of the scalable vector type.
9831     ValVT = LocVT;
9832   }
9833   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9834                                  /*IsImmutable=*/true);
9835   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9836   SDValue Val;
9837 
9838   ISD::LoadExtType ExtType;
9839   switch (VA.getLocInfo()) {
9840   default:
9841     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9842   case CCValAssign::Full:
9843   case CCValAssign::Indirect:
9844   case CCValAssign::BCvt:
9845     ExtType = ISD::NON_EXTLOAD;
9846     break;
9847   }
9848   Val = DAG.getExtLoad(
9849       ExtType, DL, LocVT, Chain, FIN,
9850       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9851   return Val;
9852 }
9853 
9854 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9855                                        const CCValAssign &VA, const SDLoc &DL) {
9856   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9857          "Unexpected VA");
9858   MachineFunction &MF = DAG.getMachineFunction();
9859   MachineFrameInfo &MFI = MF.getFrameInfo();
9860   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9861 
9862   if (VA.isMemLoc()) {
9863     // f64 is passed on the stack.
9864     int FI =
9865         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9866     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9867     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9868                        MachinePointerInfo::getFixedStack(MF, FI));
9869   }
9870 
9871   assert(VA.isRegLoc() && "Expected register VA assignment");
9872 
9873   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9874   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9875   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9876   SDValue Hi;
9877   if (VA.getLocReg() == RISCV::X17) {
9878     // Second half of f64 is passed on the stack.
9879     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9880     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9881     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9882                      MachinePointerInfo::getFixedStack(MF, FI));
9883   } else {
9884     // Second half of f64 is passed in another GPR.
9885     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9886     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9887     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9888   }
9889   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9890 }
9891 
9892 // FastCC has less than 1% performance improvement for some particular
9893 // benchmark. But theoretically, it may has benenfit for some cases.
9894 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9895                             unsigned ValNo, MVT ValVT, MVT LocVT,
9896                             CCValAssign::LocInfo LocInfo,
9897                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9898                             bool IsFixed, bool IsRet, Type *OrigTy,
9899                             const RISCVTargetLowering &TLI,
9900                             Optional<unsigned> FirstMaskArgument) {
9901 
9902   // X5 and X6 might be used for save-restore libcall.
9903   static const MCPhysReg GPRList[] = {
9904       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9905       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9906       RISCV::X29, RISCV::X30, RISCV::X31};
9907 
9908   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9909     if (unsigned Reg = State.AllocateReg(GPRList)) {
9910       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9911       return false;
9912     }
9913   }
9914 
9915   if (LocVT == MVT::f16) {
9916     static const MCPhysReg FPR16List[] = {
9917         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9918         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9919         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9920         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9921     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9922       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9923       return false;
9924     }
9925   }
9926 
9927   if (LocVT == MVT::f32) {
9928     static const MCPhysReg FPR32List[] = {
9929         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9930         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9931         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9932         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9933     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9934       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9935       return false;
9936     }
9937   }
9938 
9939   if (LocVT == MVT::f64) {
9940     static const MCPhysReg FPR64List[] = {
9941         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9942         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9943         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9944         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9945     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9946       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9947       return false;
9948     }
9949   }
9950 
9951   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9952     unsigned Offset4 = State.AllocateStack(4, Align(4));
9953     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9954     return false;
9955   }
9956 
9957   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9958     unsigned Offset5 = State.AllocateStack(8, Align(8));
9959     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9960     return false;
9961   }
9962 
9963   if (LocVT.isVector()) {
9964     if (unsigned Reg =
9965             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9966       // Fixed-length vectors are located in the corresponding scalable-vector
9967       // container types.
9968       if (ValVT.isFixedLengthVector())
9969         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9970       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9971     } else {
9972       // Try and pass the address via a "fast" GPR.
9973       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9974         LocInfo = CCValAssign::Indirect;
9975         LocVT = TLI.getSubtarget().getXLenVT();
9976         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9977       } else if (ValVT.isFixedLengthVector()) {
9978         auto StackAlign =
9979             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9980         unsigned StackOffset =
9981             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9982         State.addLoc(
9983             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9984       } else {
9985         // Can't pass scalable vectors on the stack.
9986         return true;
9987       }
9988     }
9989 
9990     return false;
9991   }
9992 
9993   return true; // CC didn't match.
9994 }
9995 
9996 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9997                          CCValAssign::LocInfo LocInfo,
9998                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9999 
10000   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10001     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10002     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10003     static const MCPhysReg GPRList[] = {
10004         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10005         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10006     if (unsigned Reg = State.AllocateReg(GPRList)) {
10007       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10008       return false;
10009     }
10010   }
10011 
10012   if (LocVT == MVT::f32) {
10013     // Pass in STG registers: F1, ..., F6
10014     //                        fs0 ... fs5
10015     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10016                                           RISCV::F18_F, RISCV::F19_F,
10017                                           RISCV::F20_F, RISCV::F21_F};
10018     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10019       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10020       return false;
10021     }
10022   }
10023 
10024   if (LocVT == MVT::f64) {
10025     // Pass in STG registers: D1, ..., D6
10026     //                        fs6 ... fs11
10027     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10028                                           RISCV::F24_D, RISCV::F25_D,
10029                                           RISCV::F26_D, RISCV::F27_D};
10030     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10031       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10032       return false;
10033     }
10034   }
10035 
10036   report_fatal_error("No registers left in GHC calling convention");
10037   return true;
10038 }
10039 
10040 // Transform physical registers into virtual registers.
10041 SDValue RISCVTargetLowering::LowerFormalArguments(
10042     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10043     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10044     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10045 
10046   MachineFunction &MF = DAG.getMachineFunction();
10047 
10048   switch (CallConv) {
10049   default:
10050     report_fatal_error("Unsupported calling convention");
10051   case CallingConv::C:
10052   case CallingConv::Fast:
10053     break;
10054   case CallingConv::GHC:
10055     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10056         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10057       report_fatal_error(
10058         "GHC calling convention requires the F and D instruction set extensions");
10059   }
10060 
10061   const Function &Func = MF.getFunction();
10062   if (Func.hasFnAttribute("interrupt")) {
10063     if (!Func.arg_empty())
10064       report_fatal_error(
10065         "Functions with the interrupt attribute cannot have arguments!");
10066 
10067     StringRef Kind =
10068       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10069 
10070     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10071       report_fatal_error(
10072         "Function interrupt attribute argument not supported!");
10073   }
10074 
10075   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10076   MVT XLenVT = Subtarget.getXLenVT();
10077   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10078   // Used with vargs to acumulate store chains.
10079   std::vector<SDValue> OutChains;
10080 
10081   // Assign locations to all of the incoming arguments.
10082   SmallVector<CCValAssign, 16> ArgLocs;
10083   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10084 
10085   if (CallConv == CallingConv::GHC)
10086     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10087   else
10088     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10089                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10090                                                    : CC_RISCV);
10091 
10092   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10093     CCValAssign &VA = ArgLocs[i];
10094     SDValue ArgValue;
10095     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10096     // case.
10097     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10098       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10099     else if (VA.isRegLoc())
10100       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10101     else
10102       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10103 
10104     if (VA.getLocInfo() == CCValAssign::Indirect) {
10105       // If the original argument was split and passed by reference (e.g. i128
10106       // on RV32), we need to load all parts of it here (using the same
10107       // address). Vectors may be partly split to registers and partly to the
10108       // stack, in which case the base address is partly offset and subsequent
10109       // stores are relative to that.
10110       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10111                                    MachinePointerInfo()));
10112       unsigned ArgIndex = Ins[i].OrigArgIndex;
10113       unsigned ArgPartOffset = Ins[i].PartOffset;
10114       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10115       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10116         CCValAssign &PartVA = ArgLocs[i + 1];
10117         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10118         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10119         if (PartVA.getValVT().isScalableVector())
10120           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10121         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10122         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10123                                      MachinePointerInfo()));
10124         ++i;
10125       }
10126       continue;
10127     }
10128     InVals.push_back(ArgValue);
10129   }
10130 
10131   if (IsVarArg) {
10132     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10133     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10134     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10135     MachineFrameInfo &MFI = MF.getFrameInfo();
10136     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10137     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10138 
10139     // Offset of the first variable argument from stack pointer, and size of
10140     // the vararg save area. For now, the varargs save area is either zero or
10141     // large enough to hold a0-a7.
10142     int VaArgOffset, VarArgsSaveSize;
10143 
10144     // If all registers are allocated, then all varargs must be passed on the
10145     // stack and we don't need to save any argregs.
10146     if (ArgRegs.size() == Idx) {
10147       VaArgOffset = CCInfo.getNextStackOffset();
10148       VarArgsSaveSize = 0;
10149     } else {
10150       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10151       VaArgOffset = -VarArgsSaveSize;
10152     }
10153 
10154     // Record the frame index of the first variable argument
10155     // which is a value necessary to VASTART.
10156     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10157     RVFI->setVarArgsFrameIndex(FI);
10158 
10159     // If saving an odd number of registers then create an extra stack slot to
10160     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10161     // offsets to even-numbered registered remain 2*XLEN-aligned.
10162     if (Idx % 2) {
10163       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10164       VarArgsSaveSize += XLenInBytes;
10165     }
10166 
10167     // Copy the integer registers that may have been used for passing varargs
10168     // to the vararg save area.
10169     for (unsigned I = Idx; I < ArgRegs.size();
10170          ++I, VaArgOffset += XLenInBytes) {
10171       const Register Reg = RegInfo.createVirtualRegister(RC);
10172       RegInfo.addLiveIn(ArgRegs[I], Reg);
10173       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10174       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10175       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10176       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10177                                    MachinePointerInfo::getFixedStack(MF, FI));
10178       cast<StoreSDNode>(Store.getNode())
10179           ->getMemOperand()
10180           ->setValue((Value *)nullptr);
10181       OutChains.push_back(Store);
10182     }
10183     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10184   }
10185 
10186   // All stores are grouped in one node to allow the matching between
10187   // the size of Ins and InVals. This only happens for vararg functions.
10188   if (!OutChains.empty()) {
10189     OutChains.push_back(Chain);
10190     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10191   }
10192 
10193   return Chain;
10194 }
10195 
10196 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10197 /// for tail call optimization.
10198 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10199 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10200     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10201     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10202 
10203   auto &Callee = CLI.Callee;
10204   auto CalleeCC = CLI.CallConv;
10205   auto &Outs = CLI.Outs;
10206   auto &Caller = MF.getFunction();
10207   auto CallerCC = Caller.getCallingConv();
10208 
10209   // Exception-handling functions need a special set of instructions to
10210   // indicate a return to the hardware. Tail-calling another function would
10211   // probably break this.
10212   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10213   // should be expanded as new function attributes are introduced.
10214   if (Caller.hasFnAttribute("interrupt"))
10215     return false;
10216 
10217   // Do not tail call opt if the stack is used to pass parameters.
10218   if (CCInfo.getNextStackOffset() != 0)
10219     return false;
10220 
10221   // Do not tail call opt if any parameters need to be passed indirectly.
10222   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10223   // passed indirectly. So the address of the value will be passed in a
10224   // register, or if not available, then the address is put on the stack. In
10225   // order to pass indirectly, space on the stack often needs to be allocated
10226   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10227   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10228   // are passed CCValAssign::Indirect.
10229   for (auto &VA : ArgLocs)
10230     if (VA.getLocInfo() == CCValAssign::Indirect)
10231       return false;
10232 
10233   // Do not tail call opt if either caller or callee uses struct return
10234   // semantics.
10235   auto IsCallerStructRet = Caller.hasStructRetAttr();
10236   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10237   if (IsCallerStructRet || IsCalleeStructRet)
10238     return false;
10239 
10240   // Externally-defined functions with weak linkage should not be
10241   // tail-called. The behaviour of branch instructions in this situation (as
10242   // used for tail calls) is implementation-defined, so we cannot rely on the
10243   // linker replacing the tail call with a return.
10244   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10245     const GlobalValue *GV = G->getGlobal();
10246     if (GV->hasExternalWeakLinkage())
10247       return false;
10248   }
10249 
10250   // The callee has to preserve all registers the caller needs to preserve.
10251   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10252   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10253   if (CalleeCC != CallerCC) {
10254     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10255     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10256       return false;
10257   }
10258 
10259   // Byval parameters hand the function a pointer directly into the stack area
10260   // we want to reuse during a tail call. Working around this *is* possible
10261   // but less efficient and uglier in LowerCall.
10262   for (auto &Arg : Outs)
10263     if (Arg.Flags.isByVal())
10264       return false;
10265 
10266   return true;
10267 }
10268 
10269 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10270   return DAG.getDataLayout().getPrefTypeAlign(
10271       VT.getTypeForEVT(*DAG.getContext()));
10272 }
10273 
10274 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10275 // and output parameter nodes.
10276 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10277                                        SmallVectorImpl<SDValue> &InVals) const {
10278   SelectionDAG &DAG = CLI.DAG;
10279   SDLoc &DL = CLI.DL;
10280   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10281   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10282   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10283   SDValue Chain = CLI.Chain;
10284   SDValue Callee = CLI.Callee;
10285   bool &IsTailCall = CLI.IsTailCall;
10286   CallingConv::ID CallConv = CLI.CallConv;
10287   bool IsVarArg = CLI.IsVarArg;
10288   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10289   MVT XLenVT = Subtarget.getXLenVT();
10290 
10291   MachineFunction &MF = DAG.getMachineFunction();
10292 
10293   // Analyze the operands of the call, assigning locations to each operand.
10294   SmallVector<CCValAssign, 16> ArgLocs;
10295   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10296 
10297   if (CallConv == CallingConv::GHC)
10298     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10299   else
10300     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10301                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10302                                                     : CC_RISCV);
10303 
10304   // Check if it's really possible to do a tail call.
10305   if (IsTailCall)
10306     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10307 
10308   if (IsTailCall)
10309     ++NumTailCalls;
10310   else if (CLI.CB && CLI.CB->isMustTailCall())
10311     report_fatal_error("failed to perform tail call elimination on a call "
10312                        "site marked musttail");
10313 
10314   // Get a count of how many bytes are to be pushed on the stack.
10315   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10316 
10317   // Create local copies for byval args
10318   SmallVector<SDValue, 8> ByValArgs;
10319   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10320     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10321     if (!Flags.isByVal())
10322       continue;
10323 
10324     SDValue Arg = OutVals[i];
10325     unsigned Size = Flags.getByValSize();
10326     Align Alignment = Flags.getNonZeroByValAlign();
10327 
10328     int FI =
10329         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10330     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10331     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10332 
10333     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10334                           /*IsVolatile=*/false,
10335                           /*AlwaysInline=*/false, IsTailCall,
10336                           MachinePointerInfo(), MachinePointerInfo());
10337     ByValArgs.push_back(FIPtr);
10338   }
10339 
10340   if (!IsTailCall)
10341     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10342 
10343   // Copy argument values to their designated locations.
10344   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10345   SmallVector<SDValue, 8> MemOpChains;
10346   SDValue StackPtr;
10347   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10348     CCValAssign &VA = ArgLocs[i];
10349     SDValue ArgValue = OutVals[i];
10350     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10351 
10352     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10353     bool IsF64OnRV32DSoftABI =
10354         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10355     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10356       SDValue SplitF64 = DAG.getNode(
10357           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10358       SDValue Lo = SplitF64.getValue(0);
10359       SDValue Hi = SplitF64.getValue(1);
10360 
10361       Register RegLo = VA.getLocReg();
10362       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10363 
10364       if (RegLo == RISCV::X17) {
10365         // Second half of f64 is passed on the stack.
10366         // Work out the address of the stack slot.
10367         if (!StackPtr.getNode())
10368           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10369         // Emit the store.
10370         MemOpChains.push_back(
10371             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10372       } else {
10373         // Second half of f64 is passed in another GPR.
10374         assert(RegLo < RISCV::X31 && "Invalid register pair");
10375         Register RegHigh = RegLo + 1;
10376         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10377       }
10378       continue;
10379     }
10380 
10381     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10382     // as any other MemLoc.
10383 
10384     // Promote the value if needed.
10385     // For now, only handle fully promoted and indirect arguments.
10386     if (VA.getLocInfo() == CCValAssign::Indirect) {
10387       // Store the argument in a stack slot and pass its address.
10388       Align StackAlign =
10389           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10390                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10391       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10392       // If the original argument was split (e.g. i128), we need
10393       // to store the required parts of it here (and pass just one address).
10394       // Vectors may be partly split to registers and partly to the stack, in
10395       // which case the base address is partly offset and subsequent stores are
10396       // relative to that.
10397       unsigned ArgIndex = Outs[i].OrigArgIndex;
10398       unsigned ArgPartOffset = Outs[i].PartOffset;
10399       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10400       // Calculate the total size to store. We don't have access to what we're
10401       // actually storing other than performing the loop and collecting the
10402       // info.
10403       SmallVector<std::pair<SDValue, SDValue>> Parts;
10404       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10405         SDValue PartValue = OutVals[i + 1];
10406         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10407         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10408         EVT PartVT = PartValue.getValueType();
10409         if (PartVT.isScalableVector())
10410           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10411         StoredSize += PartVT.getStoreSize();
10412         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10413         Parts.push_back(std::make_pair(PartValue, Offset));
10414         ++i;
10415       }
10416       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10417       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10418       MemOpChains.push_back(
10419           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10420                        MachinePointerInfo::getFixedStack(MF, FI)));
10421       for (const auto &Part : Parts) {
10422         SDValue PartValue = Part.first;
10423         SDValue PartOffset = Part.second;
10424         SDValue Address =
10425             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10426         MemOpChains.push_back(
10427             DAG.getStore(Chain, DL, PartValue, Address,
10428                          MachinePointerInfo::getFixedStack(MF, FI)));
10429       }
10430       ArgValue = SpillSlot;
10431     } else {
10432       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10433     }
10434 
10435     // Use local copy if it is a byval arg.
10436     if (Flags.isByVal())
10437       ArgValue = ByValArgs[j++];
10438 
10439     if (VA.isRegLoc()) {
10440       // Queue up the argument copies and emit them at the end.
10441       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10442     } else {
10443       assert(VA.isMemLoc() && "Argument not register or memory");
10444       assert(!IsTailCall && "Tail call not allowed if stack is used "
10445                             "for passing parameters");
10446 
10447       // Work out the address of the stack slot.
10448       if (!StackPtr.getNode())
10449         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10450       SDValue Address =
10451           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10452                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10453 
10454       // Emit the store.
10455       MemOpChains.push_back(
10456           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10457     }
10458   }
10459 
10460   // Join the stores, which are independent of one another.
10461   if (!MemOpChains.empty())
10462     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10463 
10464   SDValue Glue;
10465 
10466   // Build a sequence of copy-to-reg nodes, chained and glued together.
10467   for (auto &Reg : RegsToPass) {
10468     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10469     Glue = Chain.getValue(1);
10470   }
10471 
10472   // Validate that none of the argument registers have been marked as
10473   // reserved, if so report an error. Do the same for the return address if this
10474   // is not a tailcall.
10475   validateCCReservedRegs(RegsToPass, MF);
10476   if (!IsTailCall &&
10477       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10478     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10479         MF.getFunction(),
10480         "Return address register required, but has been reserved."});
10481 
10482   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10483   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10484   // split it and then direct call can be matched by PseudoCALL.
10485   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10486     const GlobalValue *GV = S->getGlobal();
10487 
10488     unsigned OpFlags = RISCVII::MO_CALL;
10489     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10490       OpFlags = RISCVII::MO_PLT;
10491 
10492     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10493   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10494     unsigned OpFlags = RISCVII::MO_CALL;
10495 
10496     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10497                                                  nullptr))
10498       OpFlags = RISCVII::MO_PLT;
10499 
10500     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10501   }
10502 
10503   // The first call operand is the chain and the second is the target address.
10504   SmallVector<SDValue, 8> Ops;
10505   Ops.push_back(Chain);
10506   Ops.push_back(Callee);
10507 
10508   // Add argument registers to the end of the list so that they are
10509   // known live into the call.
10510   for (auto &Reg : RegsToPass)
10511     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10512 
10513   if (!IsTailCall) {
10514     // Add a register mask operand representing the call-preserved registers.
10515     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10516     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10517     assert(Mask && "Missing call preserved mask for calling convention");
10518     Ops.push_back(DAG.getRegisterMask(Mask));
10519   }
10520 
10521   // Glue the call to the argument copies, if any.
10522   if (Glue.getNode())
10523     Ops.push_back(Glue);
10524 
10525   // Emit the call.
10526   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10527 
10528   if (IsTailCall) {
10529     MF.getFrameInfo().setHasTailCall();
10530     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10531   }
10532 
10533   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10534   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10535   Glue = Chain.getValue(1);
10536 
10537   // Mark the end of the call, which is glued to the call itself.
10538   Chain = DAG.getCALLSEQ_END(Chain,
10539                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10540                              DAG.getConstant(0, DL, PtrVT, true),
10541                              Glue, DL);
10542   Glue = Chain.getValue(1);
10543 
10544   // Assign locations to each value returned by this call.
10545   SmallVector<CCValAssign, 16> RVLocs;
10546   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10547   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10548 
10549   // Copy all of the result registers out of their specified physreg.
10550   for (auto &VA : RVLocs) {
10551     // Copy the value out
10552     SDValue RetValue =
10553         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10554     // Glue the RetValue to the end of the call sequence
10555     Chain = RetValue.getValue(1);
10556     Glue = RetValue.getValue(2);
10557 
10558     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10559       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10560       SDValue RetValue2 =
10561           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10562       Chain = RetValue2.getValue(1);
10563       Glue = RetValue2.getValue(2);
10564       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10565                              RetValue2);
10566     }
10567 
10568     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10569 
10570     InVals.push_back(RetValue);
10571   }
10572 
10573   return Chain;
10574 }
10575 
10576 bool RISCVTargetLowering::CanLowerReturn(
10577     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10578     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10579   SmallVector<CCValAssign, 16> RVLocs;
10580   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10581 
10582   Optional<unsigned> FirstMaskArgument;
10583   if (Subtarget.hasVInstructions())
10584     FirstMaskArgument = preAssignMask(Outs);
10585 
10586   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10587     MVT VT = Outs[i].VT;
10588     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10589     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10590     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10591                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10592                  *this, FirstMaskArgument))
10593       return false;
10594   }
10595   return true;
10596 }
10597 
10598 SDValue
10599 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10600                                  bool IsVarArg,
10601                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10602                                  const SmallVectorImpl<SDValue> &OutVals,
10603                                  const SDLoc &DL, SelectionDAG &DAG) const {
10604   const MachineFunction &MF = DAG.getMachineFunction();
10605   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10606 
10607   // Stores the assignment of the return value to a location.
10608   SmallVector<CCValAssign, 16> RVLocs;
10609 
10610   // Info about the registers and stack slot.
10611   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10612                  *DAG.getContext());
10613 
10614   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10615                     nullptr, CC_RISCV);
10616 
10617   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10618     report_fatal_error("GHC functions return void only");
10619 
10620   SDValue Glue;
10621   SmallVector<SDValue, 4> RetOps(1, Chain);
10622 
10623   // Copy the result values into the output registers.
10624   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10625     SDValue Val = OutVals[i];
10626     CCValAssign &VA = RVLocs[i];
10627     assert(VA.isRegLoc() && "Can only return in registers!");
10628 
10629     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10630       // Handle returning f64 on RV32D with a soft float ABI.
10631       assert(VA.isRegLoc() && "Expected return via registers");
10632       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10633                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10634       SDValue Lo = SplitF64.getValue(0);
10635       SDValue Hi = SplitF64.getValue(1);
10636       Register RegLo = VA.getLocReg();
10637       assert(RegLo < RISCV::X31 && "Invalid register pair");
10638       Register RegHi = RegLo + 1;
10639 
10640       if (STI.isRegisterReservedByUser(RegLo) ||
10641           STI.isRegisterReservedByUser(RegHi))
10642         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10643             MF.getFunction(),
10644             "Return value register required, but has been reserved."});
10645 
10646       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10647       Glue = Chain.getValue(1);
10648       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10649       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10650       Glue = Chain.getValue(1);
10651       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10652     } else {
10653       // Handle a 'normal' return.
10654       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10655       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10656 
10657       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10658         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10659             MF.getFunction(),
10660             "Return value register required, but has been reserved."});
10661 
10662       // Guarantee that all emitted copies are stuck together.
10663       Glue = Chain.getValue(1);
10664       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10665     }
10666   }
10667 
10668   RetOps[0] = Chain; // Update chain.
10669 
10670   // Add the glue node if we have it.
10671   if (Glue.getNode()) {
10672     RetOps.push_back(Glue);
10673   }
10674 
10675   unsigned RetOpc = RISCVISD::RET_FLAG;
10676   // Interrupt service routines use different return instructions.
10677   const Function &Func = DAG.getMachineFunction().getFunction();
10678   if (Func.hasFnAttribute("interrupt")) {
10679     if (!Func.getReturnType()->isVoidTy())
10680       report_fatal_error(
10681           "Functions with the interrupt attribute must have void return type!");
10682 
10683     MachineFunction &MF = DAG.getMachineFunction();
10684     StringRef Kind =
10685       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10686 
10687     if (Kind == "user")
10688       RetOpc = RISCVISD::URET_FLAG;
10689     else if (Kind == "supervisor")
10690       RetOpc = RISCVISD::SRET_FLAG;
10691     else
10692       RetOpc = RISCVISD::MRET_FLAG;
10693   }
10694 
10695   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10696 }
10697 
10698 void RISCVTargetLowering::validateCCReservedRegs(
10699     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10700     MachineFunction &MF) const {
10701   const Function &F = MF.getFunction();
10702   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10703 
10704   if (llvm::any_of(Regs, [&STI](auto Reg) {
10705         return STI.isRegisterReservedByUser(Reg.first);
10706       }))
10707     F.getContext().diagnose(DiagnosticInfoUnsupported{
10708         F, "Argument register required, but has been reserved."});
10709 }
10710 
10711 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10712   return CI->isTailCall();
10713 }
10714 
10715 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10716 #define NODE_NAME_CASE(NODE)                                                   \
10717   case RISCVISD::NODE:                                                         \
10718     return "RISCVISD::" #NODE;
10719   // clang-format off
10720   switch ((RISCVISD::NodeType)Opcode) {
10721   case RISCVISD::FIRST_NUMBER:
10722     break;
10723   NODE_NAME_CASE(RET_FLAG)
10724   NODE_NAME_CASE(URET_FLAG)
10725   NODE_NAME_CASE(SRET_FLAG)
10726   NODE_NAME_CASE(MRET_FLAG)
10727   NODE_NAME_CASE(CALL)
10728   NODE_NAME_CASE(SELECT_CC)
10729   NODE_NAME_CASE(BR_CC)
10730   NODE_NAME_CASE(BuildPairF64)
10731   NODE_NAME_CASE(SplitF64)
10732   NODE_NAME_CASE(TAIL)
10733   NODE_NAME_CASE(MULHSU)
10734   NODE_NAME_CASE(SLLW)
10735   NODE_NAME_CASE(SRAW)
10736   NODE_NAME_CASE(SRLW)
10737   NODE_NAME_CASE(DIVW)
10738   NODE_NAME_CASE(DIVUW)
10739   NODE_NAME_CASE(REMUW)
10740   NODE_NAME_CASE(ROLW)
10741   NODE_NAME_CASE(RORW)
10742   NODE_NAME_CASE(CLZW)
10743   NODE_NAME_CASE(CTZW)
10744   NODE_NAME_CASE(FSLW)
10745   NODE_NAME_CASE(FSRW)
10746   NODE_NAME_CASE(FSL)
10747   NODE_NAME_CASE(FSR)
10748   NODE_NAME_CASE(FMV_H_X)
10749   NODE_NAME_CASE(FMV_X_ANYEXTH)
10750   NODE_NAME_CASE(FMV_X_SIGNEXTH)
10751   NODE_NAME_CASE(FMV_W_X_RV64)
10752   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10753   NODE_NAME_CASE(FCVT_X)
10754   NODE_NAME_CASE(FCVT_XU)
10755   NODE_NAME_CASE(FCVT_W_RV64)
10756   NODE_NAME_CASE(FCVT_WU_RV64)
10757   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10758   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10759   NODE_NAME_CASE(READ_CYCLE_WIDE)
10760   NODE_NAME_CASE(GREV)
10761   NODE_NAME_CASE(GREVW)
10762   NODE_NAME_CASE(GORC)
10763   NODE_NAME_CASE(GORCW)
10764   NODE_NAME_CASE(SHFL)
10765   NODE_NAME_CASE(SHFLW)
10766   NODE_NAME_CASE(UNSHFL)
10767   NODE_NAME_CASE(UNSHFLW)
10768   NODE_NAME_CASE(BFP)
10769   NODE_NAME_CASE(BFPW)
10770   NODE_NAME_CASE(BCOMPRESS)
10771   NODE_NAME_CASE(BCOMPRESSW)
10772   NODE_NAME_CASE(BDECOMPRESS)
10773   NODE_NAME_CASE(BDECOMPRESSW)
10774   NODE_NAME_CASE(VMV_V_X_VL)
10775   NODE_NAME_CASE(VFMV_V_F_VL)
10776   NODE_NAME_CASE(VMV_X_S)
10777   NODE_NAME_CASE(VMV_S_X_VL)
10778   NODE_NAME_CASE(VFMV_S_F_VL)
10779   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10780   NODE_NAME_CASE(READ_VLENB)
10781   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10782   NODE_NAME_CASE(VSLIDEUP_VL)
10783   NODE_NAME_CASE(VSLIDE1UP_VL)
10784   NODE_NAME_CASE(VSLIDEDOWN_VL)
10785   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10786   NODE_NAME_CASE(VID_VL)
10787   NODE_NAME_CASE(VFNCVT_ROD_VL)
10788   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10789   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10790   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10791   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10792   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10793   NODE_NAME_CASE(VECREDUCE_AND_VL)
10794   NODE_NAME_CASE(VECREDUCE_OR_VL)
10795   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10796   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10797   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10798   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10799   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10800   NODE_NAME_CASE(ADD_VL)
10801   NODE_NAME_CASE(AND_VL)
10802   NODE_NAME_CASE(MUL_VL)
10803   NODE_NAME_CASE(OR_VL)
10804   NODE_NAME_CASE(SDIV_VL)
10805   NODE_NAME_CASE(SHL_VL)
10806   NODE_NAME_CASE(SREM_VL)
10807   NODE_NAME_CASE(SRA_VL)
10808   NODE_NAME_CASE(SRL_VL)
10809   NODE_NAME_CASE(SUB_VL)
10810   NODE_NAME_CASE(UDIV_VL)
10811   NODE_NAME_CASE(UREM_VL)
10812   NODE_NAME_CASE(XOR_VL)
10813   NODE_NAME_CASE(SADDSAT_VL)
10814   NODE_NAME_CASE(UADDSAT_VL)
10815   NODE_NAME_CASE(SSUBSAT_VL)
10816   NODE_NAME_CASE(USUBSAT_VL)
10817   NODE_NAME_CASE(FADD_VL)
10818   NODE_NAME_CASE(FSUB_VL)
10819   NODE_NAME_CASE(FMUL_VL)
10820   NODE_NAME_CASE(FDIV_VL)
10821   NODE_NAME_CASE(FNEG_VL)
10822   NODE_NAME_CASE(FABS_VL)
10823   NODE_NAME_CASE(FSQRT_VL)
10824   NODE_NAME_CASE(FMA_VL)
10825   NODE_NAME_CASE(FCOPYSIGN_VL)
10826   NODE_NAME_CASE(SMIN_VL)
10827   NODE_NAME_CASE(SMAX_VL)
10828   NODE_NAME_CASE(UMIN_VL)
10829   NODE_NAME_CASE(UMAX_VL)
10830   NODE_NAME_CASE(FMINNUM_VL)
10831   NODE_NAME_CASE(FMAXNUM_VL)
10832   NODE_NAME_CASE(MULHS_VL)
10833   NODE_NAME_CASE(MULHU_VL)
10834   NODE_NAME_CASE(FP_TO_SINT_VL)
10835   NODE_NAME_CASE(FP_TO_UINT_VL)
10836   NODE_NAME_CASE(SINT_TO_FP_VL)
10837   NODE_NAME_CASE(UINT_TO_FP_VL)
10838   NODE_NAME_CASE(FP_EXTEND_VL)
10839   NODE_NAME_CASE(FP_ROUND_VL)
10840   NODE_NAME_CASE(VWMUL_VL)
10841   NODE_NAME_CASE(VWMULU_VL)
10842   NODE_NAME_CASE(VWMULSU_VL)
10843   NODE_NAME_CASE(VWADD_VL)
10844   NODE_NAME_CASE(VWADDU_VL)
10845   NODE_NAME_CASE(VWSUB_VL)
10846   NODE_NAME_CASE(VWSUBU_VL)
10847   NODE_NAME_CASE(VWADD_W_VL)
10848   NODE_NAME_CASE(VWADDU_W_VL)
10849   NODE_NAME_CASE(VWSUB_W_VL)
10850   NODE_NAME_CASE(VWSUBU_W_VL)
10851   NODE_NAME_CASE(SETCC_VL)
10852   NODE_NAME_CASE(VSELECT_VL)
10853   NODE_NAME_CASE(VP_MERGE_VL)
10854   NODE_NAME_CASE(VMAND_VL)
10855   NODE_NAME_CASE(VMOR_VL)
10856   NODE_NAME_CASE(VMXOR_VL)
10857   NODE_NAME_CASE(VMCLR_VL)
10858   NODE_NAME_CASE(VMSET_VL)
10859   NODE_NAME_CASE(VRGATHER_VX_VL)
10860   NODE_NAME_CASE(VRGATHER_VV_VL)
10861   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10862   NODE_NAME_CASE(VSEXT_VL)
10863   NODE_NAME_CASE(VZEXT_VL)
10864   NODE_NAME_CASE(VCPOP_VL)
10865   NODE_NAME_CASE(READ_CSR)
10866   NODE_NAME_CASE(WRITE_CSR)
10867   NODE_NAME_CASE(SWAP_CSR)
10868   }
10869   // clang-format on
10870   return nullptr;
10871 #undef NODE_NAME_CASE
10872 }
10873 
10874 /// getConstraintType - Given a constraint letter, return the type of
10875 /// constraint it is for this target.
10876 RISCVTargetLowering::ConstraintType
10877 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10878   if (Constraint.size() == 1) {
10879     switch (Constraint[0]) {
10880     default:
10881       break;
10882     case 'f':
10883       return C_RegisterClass;
10884     case 'I':
10885     case 'J':
10886     case 'K':
10887       return C_Immediate;
10888     case 'A':
10889       return C_Memory;
10890     case 'S': // A symbolic address
10891       return C_Other;
10892     }
10893   } else {
10894     if (Constraint == "vr" || Constraint == "vm")
10895       return C_RegisterClass;
10896   }
10897   return TargetLowering::getConstraintType(Constraint);
10898 }
10899 
10900 std::pair<unsigned, const TargetRegisterClass *>
10901 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10902                                                   StringRef Constraint,
10903                                                   MVT VT) const {
10904   // First, see if this is a constraint that directly corresponds to a
10905   // RISCV register class.
10906   if (Constraint.size() == 1) {
10907     switch (Constraint[0]) {
10908     case 'r':
10909       // TODO: Support fixed vectors up to XLen for P extension?
10910       if (VT.isVector())
10911         break;
10912       return std::make_pair(0U, &RISCV::GPRRegClass);
10913     case 'f':
10914       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10915         return std::make_pair(0U, &RISCV::FPR16RegClass);
10916       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10917         return std::make_pair(0U, &RISCV::FPR32RegClass);
10918       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10919         return std::make_pair(0U, &RISCV::FPR64RegClass);
10920       break;
10921     default:
10922       break;
10923     }
10924   } else if (Constraint == "vr") {
10925     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10926                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10927       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10928         return std::make_pair(0U, RC);
10929     }
10930   } else if (Constraint == "vm") {
10931     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10932       return std::make_pair(0U, &RISCV::VMV0RegClass);
10933   }
10934 
10935   // Clang will correctly decode the usage of register name aliases into their
10936   // official names. However, other frontends like `rustc` do not. This allows
10937   // users of these frontends to use the ABI names for registers in LLVM-style
10938   // register constraints.
10939   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10940                                .Case("{zero}", RISCV::X0)
10941                                .Case("{ra}", RISCV::X1)
10942                                .Case("{sp}", RISCV::X2)
10943                                .Case("{gp}", RISCV::X3)
10944                                .Case("{tp}", RISCV::X4)
10945                                .Case("{t0}", RISCV::X5)
10946                                .Case("{t1}", RISCV::X6)
10947                                .Case("{t2}", RISCV::X7)
10948                                .Cases("{s0}", "{fp}", RISCV::X8)
10949                                .Case("{s1}", RISCV::X9)
10950                                .Case("{a0}", RISCV::X10)
10951                                .Case("{a1}", RISCV::X11)
10952                                .Case("{a2}", RISCV::X12)
10953                                .Case("{a3}", RISCV::X13)
10954                                .Case("{a4}", RISCV::X14)
10955                                .Case("{a5}", RISCV::X15)
10956                                .Case("{a6}", RISCV::X16)
10957                                .Case("{a7}", RISCV::X17)
10958                                .Case("{s2}", RISCV::X18)
10959                                .Case("{s3}", RISCV::X19)
10960                                .Case("{s4}", RISCV::X20)
10961                                .Case("{s5}", RISCV::X21)
10962                                .Case("{s6}", RISCV::X22)
10963                                .Case("{s7}", RISCV::X23)
10964                                .Case("{s8}", RISCV::X24)
10965                                .Case("{s9}", RISCV::X25)
10966                                .Case("{s10}", RISCV::X26)
10967                                .Case("{s11}", RISCV::X27)
10968                                .Case("{t3}", RISCV::X28)
10969                                .Case("{t4}", RISCV::X29)
10970                                .Case("{t5}", RISCV::X30)
10971                                .Case("{t6}", RISCV::X31)
10972                                .Default(RISCV::NoRegister);
10973   if (XRegFromAlias != RISCV::NoRegister)
10974     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10975 
10976   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10977   // TableGen record rather than the AsmName to choose registers for InlineAsm
10978   // constraints, plus we want to match those names to the widest floating point
10979   // register type available, manually select floating point registers here.
10980   //
10981   // The second case is the ABI name of the register, so that frontends can also
10982   // use the ABI names in register constraint lists.
10983   if (Subtarget.hasStdExtF()) {
10984     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10985                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10986                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10987                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10988                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10989                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10990                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10991                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10992                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10993                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10994                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10995                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10996                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10997                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10998                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10999                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11000                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11001                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11002                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11003                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11004                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11005                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11006                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11007                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11008                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11009                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11010                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11011                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11012                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11013                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11014                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11015                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11016                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11017                         .Default(RISCV::NoRegister);
11018     if (FReg != RISCV::NoRegister) {
11019       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11020       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11021         unsigned RegNo = FReg - RISCV::F0_F;
11022         unsigned DReg = RISCV::F0_D + RegNo;
11023         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11024       }
11025       if (VT == MVT::f32 || VT == MVT::Other)
11026         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11027       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11028         unsigned RegNo = FReg - RISCV::F0_F;
11029         unsigned HReg = RISCV::F0_H + RegNo;
11030         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11031       }
11032     }
11033   }
11034 
11035   if (Subtarget.hasVInstructions()) {
11036     Register VReg = StringSwitch<Register>(Constraint.lower())
11037                         .Case("{v0}", RISCV::V0)
11038                         .Case("{v1}", RISCV::V1)
11039                         .Case("{v2}", RISCV::V2)
11040                         .Case("{v3}", RISCV::V3)
11041                         .Case("{v4}", RISCV::V4)
11042                         .Case("{v5}", RISCV::V5)
11043                         .Case("{v6}", RISCV::V6)
11044                         .Case("{v7}", RISCV::V7)
11045                         .Case("{v8}", RISCV::V8)
11046                         .Case("{v9}", RISCV::V9)
11047                         .Case("{v10}", RISCV::V10)
11048                         .Case("{v11}", RISCV::V11)
11049                         .Case("{v12}", RISCV::V12)
11050                         .Case("{v13}", RISCV::V13)
11051                         .Case("{v14}", RISCV::V14)
11052                         .Case("{v15}", RISCV::V15)
11053                         .Case("{v16}", RISCV::V16)
11054                         .Case("{v17}", RISCV::V17)
11055                         .Case("{v18}", RISCV::V18)
11056                         .Case("{v19}", RISCV::V19)
11057                         .Case("{v20}", RISCV::V20)
11058                         .Case("{v21}", RISCV::V21)
11059                         .Case("{v22}", RISCV::V22)
11060                         .Case("{v23}", RISCV::V23)
11061                         .Case("{v24}", RISCV::V24)
11062                         .Case("{v25}", RISCV::V25)
11063                         .Case("{v26}", RISCV::V26)
11064                         .Case("{v27}", RISCV::V27)
11065                         .Case("{v28}", RISCV::V28)
11066                         .Case("{v29}", RISCV::V29)
11067                         .Case("{v30}", RISCV::V30)
11068                         .Case("{v31}", RISCV::V31)
11069                         .Default(RISCV::NoRegister);
11070     if (VReg != RISCV::NoRegister) {
11071       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11072         return std::make_pair(VReg, &RISCV::VMRegClass);
11073       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11074         return std::make_pair(VReg, &RISCV::VRRegClass);
11075       for (const auto *RC :
11076            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11077         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11078           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11079           return std::make_pair(VReg, RC);
11080         }
11081       }
11082     }
11083   }
11084 
11085   std::pair<Register, const TargetRegisterClass *> Res =
11086       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11087 
11088   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11089   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11090   // Subtarget into account.
11091   if (Res.second == &RISCV::GPRF16RegClass ||
11092       Res.second == &RISCV::GPRF32RegClass ||
11093       Res.second == &RISCV::GPRF64RegClass)
11094     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11095 
11096   return Res;
11097 }
11098 
11099 unsigned
11100 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11101   // Currently only support length 1 constraints.
11102   if (ConstraintCode.size() == 1) {
11103     switch (ConstraintCode[0]) {
11104     case 'A':
11105       return InlineAsm::Constraint_A;
11106     default:
11107       break;
11108     }
11109   }
11110 
11111   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11112 }
11113 
11114 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11115     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11116     SelectionDAG &DAG) const {
11117   // Currently only support length 1 constraints.
11118   if (Constraint.length() == 1) {
11119     switch (Constraint[0]) {
11120     case 'I':
11121       // Validate & create a 12-bit signed immediate operand.
11122       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11123         uint64_t CVal = C->getSExtValue();
11124         if (isInt<12>(CVal))
11125           Ops.push_back(
11126               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11127       }
11128       return;
11129     case 'J':
11130       // Validate & create an integer zero operand.
11131       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11132         if (C->getZExtValue() == 0)
11133           Ops.push_back(
11134               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11135       return;
11136     case 'K':
11137       // Validate & create a 5-bit unsigned immediate operand.
11138       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11139         uint64_t CVal = C->getZExtValue();
11140         if (isUInt<5>(CVal))
11141           Ops.push_back(
11142               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11143       }
11144       return;
11145     case 'S':
11146       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11147         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11148                                                  GA->getValueType(0)));
11149       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11150         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11151                                                 BA->getValueType(0)));
11152       }
11153       return;
11154     default:
11155       break;
11156     }
11157   }
11158   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11159 }
11160 
11161 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11162                                                    Instruction *Inst,
11163                                                    AtomicOrdering Ord) const {
11164   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11165     return Builder.CreateFence(Ord);
11166   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11167     return Builder.CreateFence(AtomicOrdering::Release);
11168   return nullptr;
11169 }
11170 
11171 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11172                                                     Instruction *Inst,
11173                                                     AtomicOrdering Ord) const {
11174   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11175     return Builder.CreateFence(AtomicOrdering::Acquire);
11176   return nullptr;
11177 }
11178 
11179 TargetLowering::AtomicExpansionKind
11180 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11181   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11182   // point operations can't be used in an lr/sc sequence without breaking the
11183   // forward-progress guarantee.
11184   if (AI->isFloatingPointOperation())
11185     return AtomicExpansionKind::CmpXChg;
11186 
11187   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11188   if (Size == 8 || Size == 16)
11189     return AtomicExpansionKind::MaskedIntrinsic;
11190   return AtomicExpansionKind::None;
11191 }
11192 
11193 static Intrinsic::ID
11194 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11195   if (XLen == 32) {
11196     switch (BinOp) {
11197     default:
11198       llvm_unreachable("Unexpected AtomicRMW BinOp");
11199     case AtomicRMWInst::Xchg:
11200       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11201     case AtomicRMWInst::Add:
11202       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11203     case AtomicRMWInst::Sub:
11204       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11205     case AtomicRMWInst::Nand:
11206       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11207     case AtomicRMWInst::Max:
11208       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11209     case AtomicRMWInst::Min:
11210       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11211     case AtomicRMWInst::UMax:
11212       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11213     case AtomicRMWInst::UMin:
11214       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11215     }
11216   }
11217 
11218   if (XLen == 64) {
11219     switch (BinOp) {
11220     default:
11221       llvm_unreachable("Unexpected AtomicRMW BinOp");
11222     case AtomicRMWInst::Xchg:
11223       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11224     case AtomicRMWInst::Add:
11225       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11226     case AtomicRMWInst::Sub:
11227       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11228     case AtomicRMWInst::Nand:
11229       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11230     case AtomicRMWInst::Max:
11231       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11232     case AtomicRMWInst::Min:
11233       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11234     case AtomicRMWInst::UMax:
11235       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11236     case AtomicRMWInst::UMin:
11237       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11238     }
11239   }
11240 
11241   llvm_unreachable("Unexpected XLen\n");
11242 }
11243 
11244 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11245     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11246     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11247   unsigned XLen = Subtarget.getXLen();
11248   Value *Ordering =
11249       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11250   Type *Tys[] = {AlignedAddr->getType()};
11251   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11252       AI->getModule(),
11253       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11254 
11255   if (XLen == 64) {
11256     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11257     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11258     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11259   }
11260 
11261   Value *Result;
11262 
11263   // Must pass the shift amount needed to sign extend the loaded value prior
11264   // to performing a signed comparison for min/max. ShiftAmt is the number of
11265   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11266   // is the number of bits to left+right shift the value in order to
11267   // sign-extend.
11268   if (AI->getOperation() == AtomicRMWInst::Min ||
11269       AI->getOperation() == AtomicRMWInst::Max) {
11270     const DataLayout &DL = AI->getModule()->getDataLayout();
11271     unsigned ValWidth =
11272         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11273     Value *SextShamt =
11274         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11275     Result = Builder.CreateCall(LrwOpScwLoop,
11276                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11277   } else {
11278     Result =
11279         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11280   }
11281 
11282   if (XLen == 64)
11283     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11284   return Result;
11285 }
11286 
11287 TargetLowering::AtomicExpansionKind
11288 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11289     AtomicCmpXchgInst *CI) const {
11290   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11291   if (Size == 8 || Size == 16)
11292     return AtomicExpansionKind::MaskedIntrinsic;
11293   return AtomicExpansionKind::None;
11294 }
11295 
11296 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11297     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11298     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11299   unsigned XLen = Subtarget.getXLen();
11300   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11301   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11302   if (XLen == 64) {
11303     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11304     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11305     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11306     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11307   }
11308   Type *Tys[] = {AlignedAddr->getType()};
11309   Function *MaskedCmpXchg =
11310       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11311   Value *Result = Builder.CreateCall(
11312       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11313   if (XLen == 64)
11314     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11315   return Result;
11316 }
11317 
11318 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11319   return false;
11320 }
11321 
11322 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11323                                                EVT VT) const {
11324   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11325     return false;
11326 
11327   switch (FPVT.getSimpleVT().SimpleTy) {
11328   case MVT::f16:
11329     return Subtarget.hasStdExtZfh();
11330   case MVT::f32:
11331     return Subtarget.hasStdExtF();
11332   case MVT::f64:
11333     return Subtarget.hasStdExtD();
11334   default:
11335     return false;
11336   }
11337 }
11338 
11339 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11340   // If we are using the small code model, we can reduce size of jump table
11341   // entry to 4 bytes.
11342   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11343       getTargetMachine().getCodeModel() == CodeModel::Small) {
11344     return MachineJumpTableInfo::EK_Custom32;
11345   }
11346   return TargetLowering::getJumpTableEncoding();
11347 }
11348 
11349 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11350     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11351     unsigned uid, MCContext &Ctx) const {
11352   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11353          getTargetMachine().getCodeModel() == CodeModel::Small);
11354   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11355 }
11356 
11357 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11358                                                      EVT VT) const {
11359   VT = VT.getScalarType();
11360 
11361   if (!VT.isSimple())
11362     return false;
11363 
11364   switch (VT.getSimpleVT().SimpleTy) {
11365   case MVT::f16:
11366     return Subtarget.hasStdExtZfh();
11367   case MVT::f32:
11368     return Subtarget.hasStdExtF();
11369   case MVT::f64:
11370     return Subtarget.hasStdExtD();
11371   default:
11372     break;
11373   }
11374 
11375   return false;
11376 }
11377 
11378 Register RISCVTargetLowering::getExceptionPointerRegister(
11379     const Constant *PersonalityFn) const {
11380   return RISCV::X10;
11381 }
11382 
11383 Register RISCVTargetLowering::getExceptionSelectorRegister(
11384     const Constant *PersonalityFn) const {
11385   return RISCV::X11;
11386 }
11387 
11388 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11389   // Return false to suppress the unnecessary extensions if the LibCall
11390   // arguments or return value is f32 type for LP64 ABI.
11391   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11392   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11393     return false;
11394 
11395   return true;
11396 }
11397 
11398 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11399   if (Subtarget.is64Bit() && Type == MVT::i32)
11400     return true;
11401 
11402   return IsSigned;
11403 }
11404 
11405 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11406                                                  SDValue C) const {
11407   // Check integral scalar types.
11408   if (VT.isScalarInteger()) {
11409     // Omit the optimization if the sub target has the M extension and the data
11410     // size exceeds XLen.
11411     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11412       return false;
11413     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11414       // Break the MUL to a SLLI and an ADD/SUB.
11415       const APInt &Imm = ConstNode->getAPIntValue();
11416       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11417           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11418         return true;
11419       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11420       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11421           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11422            (Imm - 8).isPowerOf2()))
11423         return true;
11424       // Omit the following optimization if the sub target has the M extension
11425       // and the data size >= XLen.
11426       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11427         return false;
11428       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11429       // a pair of LUI/ADDI.
11430       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11431         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11432         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11433             (1 - ImmS).isPowerOf2())
11434         return true;
11435       }
11436     }
11437   }
11438 
11439   return false;
11440 }
11441 
11442 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11443                                                       SDValue ConstNode) const {
11444   // Let the DAGCombiner decide for vectors.
11445   EVT VT = AddNode.getValueType();
11446   if (VT.isVector())
11447     return true;
11448 
11449   // Let the DAGCombiner decide for larger types.
11450   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11451     return true;
11452 
11453   // It is worse if c1 is simm12 while c1*c2 is not.
11454   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11455   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11456   const APInt &C1 = C1Node->getAPIntValue();
11457   const APInt &C2 = C2Node->getAPIntValue();
11458   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11459     return false;
11460 
11461   // Default to true and let the DAGCombiner decide.
11462   return true;
11463 }
11464 
11465 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11466     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11467     bool *Fast) const {
11468   if (!VT.isVector())
11469     return false;
11470 
11471   EVT ElemVT = VT.getVectorElementType();
11472   if (Alignment >= ElemVT.getStoreSize()) {
11473     if (Fast)
11474       *Fast = true;
11475     return true;
11476   }
11477 
11478   return false;
11479 }
11480 
11481 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11482     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11483     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11484   bool IsABIRegCopy = CC.hasValue();
11485   EVT ValueVT = Val.getValueType();
11486   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11487     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11488     // and cast to f32.
11489     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11490     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11491     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11492                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11493     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11494     Parts[0] = Val;
11495     return true;
11496   }
11497 
11498   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11499     LLVMContext &Context = *DAG.getContext();
11500     EVT ValueEltVT = ValueVT.getVectorElementType();
11501     EVT PartEltVT = PartVT.getVectorElementType();
11502     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11503     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11504     if (PartVTBitSize % ValueVTBitSize == 0) {
11505       assert(PartVTBitSize >= ValueVTBitSize);
11506       // If the element types are different, bitcast to the same element type of
11507       // PartVT first.
11508       // Give an example here, we want copy a <vscale x 1 x i8> value to
11509       // <vscale x 4 x i16>.
11510       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11511       // subvector, then we can bitcast to <vscale x 4 x i16>.
11512       if (ValueEltVT != PartEltVT) {
11513         if (PartVTBitSize > ValueVTBitSize) {
11514           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11515           assert(Count != 0 && "The number of element should not be zero.");
11516           EVT SameEltTypeVT =
11517               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11518           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11519                             DAG.getUNDEF(SameEltTypeVT), Val,
11520                             DAG.getVectorIdxConstant(0, DL));
11521         }
11522         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11523       } else {
11524         Val =
11525             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11526                         Val, DAG.getVectorIdxConstant(0, DL));
11527       }
11528       Parts[0] = Val;
11529       return true;
11530     }
11531   }
11532   return false;
11533 }
11534 
11535 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11536     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11537     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11538   bool IsABIRegCopy = CC.hasValue();
11539   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11540     SDValue Val = Parts[0];
11541 
11542     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11543     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11544     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11545     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11546     return Val;
11547   }
11548 
11549   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11550     LLVMContext &Context = *DAG.getContext();
11551     SDValue Val = Parts[0];
11552     EVT ValueEltVT = ValueVT.getVectorElementType();
11553     EVT PartEltVT = PartVT.getVectorElementType();
11554     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11555     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11556     if (PartVTBitSize % ValueVTBitSize == 0) {
11557       assert(PartVTBitSize >= ValueVTBitSize);
11558       EVT SameEltTypeVT = ValueVT;
11559       // If the element types are different, convert it to the same element type
11560       // of PartVT.
11561       // Give an example here, we want copy a <vscale x 1 x i8> value from
11562       // <vscale x 4 x i16>.
11563       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11564       // then we can extract <vscale x 1 x i8>.
11565       if (ValueEltVT != PartEltVT) {
11566         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11567         assert(Count != 0 && "The number of element should not be zero.");
11568         SameEltTypeVT =
11569             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11570         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11571       }
11572       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11573                         DAG.getVectorIdxConstant(0, DL));
11574       return Val;
11575     }
11576   }
11577   return SDValue();
11578 }
11579 
11580 SDValue
11581 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11582                                    SelectionDAG &DAG,
11583                                    SmallVectorImpl<SDNode *> &Created) const {
11584   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11585   if (isIntDivCheap(N->getValueType(0), Attr))
11586     return SDValue(N, 0); // Lower SDIV as SDIV
11587 
11588   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11589          "Unexpected divisor!");
11590 
11591   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11592   if (!Subtarget.hasStdExtZbt())
11593     return SDValue();
11594 
11595   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11596   // Besides, more critical path instructions will be generated when dividing
11597   // by 2. So we keep using the original DAGs for these cases.
11598   unsigned Lg2 = Divisor.countTrailingZeros();
11599   if (Lg2 == 1 || Lg2 >= 12)
11600     return SDValue();
11601 
11602   // fold (sdiv X, pow2)
11603   EVT VT = N->getValueType(0);
11604   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11605     return SDValue();
11606 
11607   SDLoc DL(N);
11608   SDValue N0 = N->getOperand(0);
11609   SDValue Zero = DAG.getConstant(0, DL, VT);
11610   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11611 
11612   // Add (N0 < 0) ? Pow2 - 1 : 0;
11613   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11614   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11615   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11616 
11617   Created.push_back(Cmp.getNode());
11618   Created.push_back(Add.getNode());
11619   Created.push_back(Sel.getNode());
11620 
11621   // Divide by pow2.
11622   SDValue SRA =
11623       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11624 
11625   // If we're dividing by a positive value, we're done.  Otherwise, we must
11626   // negate the result.
11627   if (Divisor.isNonNegative())
11628     return SRA;
11629 
11630   Created.push_back(SRA.getNode());
11631   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11632 }
11633 
11634 #define GET_REGISTER_MATCHER
11635 #include "RISCVGenAsmMatcher.inc"
11636 
11637 Register
11638 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11639                                        const MachineFunction &MF) const {
11640   Register Reg = MatchRegisterAltName(RegName);
11641   if (Reg == RISCV::NoRegister)
11642     Reg = MatchRegisterName(RegName);
11643   if (Reg == RISCV::NoRegister)
11644     report_fatal_error(
11645         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11646   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11647   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11648     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11649                              StringRef(RegName) + "\"."));
11650   return Reg;
11651 }
11652 
11653 namespace llvm {
11654 namespace RISCVVIntrinsicsTable {
11655 
11656 #define GET_RISCVVIntrinsicsTable_IMPL
11657 #include "RISCVGenSearchableTables.inc"
11658 
11659 } // namespace RISCVVIntrinsicsTable
11660 
11661 } // namespace llvm
11662