1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306 
307     if (Subtarget.is64Bit())
308       setOperationAction(ISD::ABS, MVT::i32, Custom);
309   }
310 
311   if (Subtarget.hasStdExtZbt()) {
312     setOperationAction(ISD::FSHL, XLenVT, Custom);
313     setOperationAction(ISD::FSHR, XLenVT, Custom);
314     setOperationAction(ISD::SELECT, XLenVT, Legal);
315 
316     if (Subtarget.is64Bit()) {
317       setOperationAction(ISD::FSHL, MVT::i32, Custom);
318       setOperationAction(ISD::FSHR, MVT::i32, Custom);
319     }
320   } else {
321     setOperationAction(ISD::SELECT, XLenVT, Custom);
322   }
323 
324   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
325       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
326       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
327       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
328       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
329       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
330       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
331 
332   static const ISD::CondCode FPCCToExpand[] = {
333       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
334       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
335       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
336 
337   static const ISD::NodeType FPOpToExpand[] = {
338       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
339       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
340 
341   if (Subtarget.hasStdExtZfh())
342     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
343 
344   if (Subtarget.hasStdExtZfh()) {
345     for (auto NT : FPLegalNodeTypes)
346       setOperationAction(NT, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
349     for (auto CC : FPCCToExpand)
350       setCondCodeAction(CC, MVT::f16, Expand);
351     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
352     setOperationAction(ISD::SELECT, MVT::f16, Custom);
353     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
354 
355     setOperationAction(ISD::FREM,       MVT::f16, Promote);
356     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
357     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
358     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
359     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
360     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
361     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
362     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
363     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
364     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
365     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
366     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
367     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
368     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
369     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
370     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
371     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
372     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
373 
374     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
375     // complete support for all operations in LegalizeDAG.
376 
377     // We need to custom promote this.
378     if (Subtarget.is64Bit())
379       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
380   }
381 
382   if (Subtarget.hasStdExtF()) {
383     for (auto NT : FPLegalNodeTypes)
384       setOperationAction(NT, MVT::f32, Legal);
385     for (auto CC : FPCCToExpand)
386       setCondCodeAction(CC, MVT::f32, Expand);
387     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
388     setOperationAction(ISD::SELECT, MVT::f32, Custom);
389     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
390     for (auto Op : FPOpToExpand)
391       setOperationAction(Op, MVT::f32, Expand);
392     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
393     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
394   }
395 
396   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
397     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtD()) {
400     for (auto NT : FPLegalNodeTypes)
401       setOperationAction(NT, MVT::f64, Legal);
402     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
404     for (auto CC : FPCCToExpand)
405       setCondCodeAction(CC, MVT::f64, Expand);
406     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Custom);
408     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
409     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
410     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f64, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.is64Bit()) {
418     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
419     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
420     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
421     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
422   }
423 
424   if (Subtarget.hasStdExtF()) {
425     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
426     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
427 
428     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
429     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
430     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
431     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
432 
433     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
434     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
435   }
436 
437   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
438   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
439   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
440   setOperationAction(ISD::JumpTable, XLenVT, Custom);
441 
442   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
443 
444   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
445   // Unfortunately this can't be determined just from the ISA naming string.
446   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
447                      Subtarget.is64Bit() ? Legal : Custom);
448 
449   setOperationAction(ISD::TRAP, MVT::Other, Legal);
450   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
451   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
452   if (Subtarget.is64Bit())
453     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
454 
455   if (Subtarget.hasStdExtA()) {
456     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
457     setMinCmpXchgSizeInBits(32);
458   } else {
459     setMaxAtomicSizeInBitsSupported(0);
460   }
461 
462   setBooleanContents(ZeroOrOneBooleanContent);
463 
464   if (Subtarget.hasVInstructions()) {
465     setBooleanVectorContents(ZeroOrOneBooleanContent);
466 
467     setOperationAction(ISD::VSCALE, XLenVT, Custom);
468 
469     // RVV intrinsics may have illegal operands.
470     // We also need to custom legalize vmv.x.s.
471     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
472     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
473     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
474     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
475     if (Subtarget.is64Bit()) {
476       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
477     } else {
478       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
479       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
480     }
481 
482     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
483     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
484 
485     static const unsigned IntegerVPOps[] = {
486         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
487         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
488         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
489         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
490         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
491         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
492         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
493         ISD::VP_MERGE,       ISD::VP_SELECT};
494 
495     static const unsigned FloatingPointVPOps[] = {
496         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
497         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FMA,
498         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN,
499         ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,       ISD::VP_SELECT};
500 
501     if (!Subtarget.is64Bit()) {
502       // We must custom-lower certain vXi64 operations on RV32 due to the vector
503       // element type being illegal.
504       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
505       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
506 
507       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
508       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
509       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
510       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
511       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
512       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
513       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
514       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
515 
516       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
517       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
518       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
519       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
520       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
521       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
522       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
523       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
524     }
525 
526     for (MVT VT : BoolVecVTs) {
527       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
528 
529       // Mask VTs are custom-expanded into a series of standard nodes
530       setOperationAction(ISD::TRUNCATE, VT, Custom);
531       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
532       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
533       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
534 
535       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
536       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
537 
538       setOperationAction(ISD::SELECT, VT, Custom);
539       setOperationAction(ISD::SELECT_CC, VT, Expand);
540       setOperationAction(ISD::VSELECT, VT, Expand);
541       setOperationAction(ISD::VP_MERGE, VT, Expand);
542       setOperationAction(ISD::VP_SELECT, VT, Expand);
543 
544       setOperationAction(ISD::VP_AND, VT, Custom);
545       setOperationAction(ISD::VP_OR, VT, Custom);
546       setOperationAction(ISD::VP_XOR, VT, Custom);
547 
548       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
549       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
550       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
551 
552       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
553       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
554       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
555 
556       // RVV has native int->float & float->int conversions where the
557       // element type sizes are within one power-of-two of each other. Any
558       // wider distances between type sizes have to be lowered as sequences
559       // which progressively narrow the gap in stages.
560       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
561       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
562       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
563       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
564 
565       // Expand all extending loads to types larger than this, and truncating
566       // stores from types larger than this.
567       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
568         setTruncStoreAction(OtherVT, VT, Expand);
569         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
570         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
571         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
572       }
573     }
574 
575     for (MVT VT : IntVecVTs) {
576       if (VT.getVectorElementType() == MVT::i64 &&
577           !Subtarget.hasVInstructionsI64())
578         continue;
579 
580       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
581       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
582 
583       // Vectors implement MULHS/MULHU.
584       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
585       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
586 
587       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
588       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
589         setOperationAction(ISD::MULHU, VT, Expand);
590         setOperationAction(ISD::MULHS, VT, Expand);
591       }
592 
593       setOperationAction(ISD::SMIN, VT, Legal);
594       setOperationAction(ISD::SMAX, VT, Legal);
595       setOperationAction(ISD::UMIN, VT, Legal);
596       setOperationAction(ISD::UMAX, VT, Legal);
597 
598       setOperationAction(ISD::ROTL, VT, Expand);
599       setOperationAction(ISD::ROTR, VT, Expand);
600 
601       setOperationAction(ISD::CTTZ, VT, Expand);
602       setOperationAction(ISD::CTLZ, VT, Expand);
603       setOperationAction(ISD::CTPOP, VT, Expand);
604 
605       setOperationAction(ISD::BSWAP, VT, Expand);
606 
607       // Custom-lower extensions and truncations from/to mask types.
608       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
609       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
610       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
611 
612       // RVV has native int->float & float->int conversions where the
613       // element type sizes are within one power-of-two of each other. Any
614       // wider distances between type sizes have to be lowered as sequences
615       // which progressively narrow the gap in stages.
616       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
617       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
618       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
619       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
620 
621       setOperationAction(ISD::SADDSAT, VT, Legal);
622       setOperationAction(ISD::UADDSAT, VT, Legal);
623       setOperationAction(ISD::SSUBSAT, VT, Legal);
624       setOperationAction(ISD::USUBSAT, VT, Legal);
625 
626       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
627       // nodes which truncate by one power of two at a time.
628       setOperationAction(ISD::TRUNCATE, VT, Custom);
629 
630       // Custom-lower insert/extract operations to simplify patterns.
631       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
632       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
633 
634       // Custom-lower reduction operations to set up the corresponding custom
635       // nodes' operands.
636       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
637       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
638       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
639       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
640       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
641       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
642       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
643       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
644 
645       for (unsigned VPOpc : IntegerVPOps)
646         setOperationAction(VPOpc, VT, Custom);
647 
648       setOperationAction(ISD::LOAD, VT, Custom);
649       setOperationAction(ISD::STORE, VT, Custom);
650 
651       setOperationAction(ISD::MLOAD, VT, Custom);
652       setOperationAction(ISD::MSTORE, VT, Custom);
653       setOperationAction(ISD::MGATHER, VT, Custom);
654       setOperationAction(ISD::MSCATTER, VT, Custom);
655 
656       setOperationAction(ISD::VP_LOAD, VT, Custom);
657       setOperationAction(ISD::VP_STORE, VT, Custom);
658       setOperationAction(ISD::VP_GATHER, VT, Custom);
659       setOperationAction(ISD::VP_SCATTER, VT, Custom);
660 
661       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
662       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
663       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
664 
665       setOperationAction(ISD::SELECT, VT, Custom);
666       setOperationAction(ISD::SELECT_CC, VT, Expand);
667 
668       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
669       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
670 
671       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
672         setTruncStoreAction(VT, OtherVT, Expand);
673         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
674         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
675         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
676       }
677 
678       // Splice
679       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
680 
681       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
682       // type that can represent the value exactly.
683       if (VT.getVectorElementType() != MVT::i64) {
684         MVT FloatEltVT =
685             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
686         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
687         if (isTypeLegal(FloatVT)) {
688           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
689           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
690         }
691       }
692     }
693 
694     // Expand various CCs to best match the RVV ISA, which natively supports UNE
695     // but no other unordered comparisons, and supports all ordered comparisons
696     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
697     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
698     // and we pattern-match those back to the "original", swapping operands once
699     // more. This way we catch both operations and both "vf" and "fv" forms with
700     // fewer patterns.
701     static const ISD::CondCode VFPCCToExpand[] = {
702         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
703         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
704         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
705     };
706 
707     // Sets common operation actions on RVV floating-point vector types.
708     const auto SetCommonVFPActions = [&](MVT VT) {
709       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
710       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
711       // sizes are within one power-of-two of each other. Therefore conversions
712       // between vXf16 and vXf64 must be lowered as sequences which convert via
713       // vXf32.
714       setOperationAction(ISD::FP_ROUND, VT, Custom);
715       setOperationAction(ISD::FP_EXTEND, VT, Custom);
716       // Custom-lower insert/extract operations to simplify patterns.
717       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
718       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
719       // Expand various condition codes (explained above).
720       for (auto CC : VFPCCToExpand)
721         setCondCodeAction(CC, VT, Expand);
722 
723       setOperationAction(ISD::FMINNUM, VT, Legal);
724       setOperationAction(ISD::FMAXNUM, VT, Legal);
725 
726       setOperationAction(ISD::FTRUNC, VT, Custom);
727       setOperationAction(ISD::FCEIL, VT, Custom);
728       setOperationAction(ISD::FFLOOR, VT, Custom);
729       setOperationAction(ISD::FROUND, VT, Custom);
730 
731       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
732       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
733       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
734       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
735 
736       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
737 
738       setOperationAction(ISD::LOAD, VT, Custom);
739       setOperationAction(ISD::STORE, VT, Custom);
740 
741       setOperationAction(ISD::MLOAD, VT, Custom);
742       setOperationAction(ISD::MSTORE, VT, Custom);
743       setOperationAction(ISD::MGATHER, VT, Custom);
744       setOperationAction(ISD::MSCATTER, VT, Custom);
745 
746       setOperationAction(ISD::VP_LOAD, VT, Custom);
747       setOperationAction(ISD::VP_STORE, VT, Custom);
748       setOperationAction(ISD::VP_GATHER, VT, Custom);
749       setOperationAction(ISD::VP_SCATTER, VT, Custom);
750 
751       setOperationAction(ISD::SELECT, VT, Custom);
752       setOperationAction(ISD::SELECT_CC, VT, Expand);
753 
754       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
755       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
756       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
757 
758       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
759       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
760 
761       for (unsigned VPOpc : FloatingPointVPOps)
762         setOperationAction(VPOpc, VT, Custom);
763     };
764 
765     // Sets common extload/truncstore actions on RVV floating-point vector
766     // types.
767     const auto SetCommonVFPExtLoadTruncStoreActions =
768         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
769           for (auto SmallVT : SmallerVTs) {
770             setTruncStoreAction(VT, SmallVT, Expand);
771             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
772           }
773         };
774 
775     if (Subtarget.hasVInstructionsF16())
776       for (MVT VT : F16VecVTs)
777         SetCommonVFPActions(VT);
778 
779     for (MVT VT : F32VecVTs) {
780       if (Subtarget.hasVInstructionsF32())
781         SetCommonVFPActions(VT);
782       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
783     }
784 
785     for (MVT VT : F64VecVTs) {
786       if (Subtarget.hasVInstructionsF64())
787         SetCommonVFPActions(VT);
788       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
789       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
790     }
791 
792     if (Subtarget.useRVVForFixedLengthVectors()) {
793       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
794         if (!useRVVForFixedLengthVectorVT(VT))
795           continue;
796 
797         // By default everything must be expanded.
798         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
799           setOperationAction(Op, VT, Expand);
800         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
801           setTruncStoreAction(VT, OtherVT, Expand);
802           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
803           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
804           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
805         }
806 
807         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
808         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
809         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
810 
811         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
812         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
813 
814         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
815         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
816 
817         setOperationAction(ISD::LOAD, VT, Custom);
818         setOperationAction(ISD::STORE, VT, Custom);
819 
820         setOperationAction(ISD::SETCC, VT, Custom);
821 
822         setOperationAction(ISD::SELECT, VT, Custom);
823 
824         setOperationAction(ISD::TRUNCATE, VT, Custom);
825 
826         setOperationAction(ISD::BITCAST, VT, Custom);
827 
828         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
829         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
830         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
831 
832         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
833         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
834         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
835 
836         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
837         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
838         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
839         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
840 
841         // Operations below are different for between masks and other vectors.
842         if (VT.getVectorElementType() == MVT::i1) {
843           setOperationAction(ISD::VP_AND, VT, Custom);
844           setOperationAction(ISD::VP_OR, VT, Custom);
845           setOperationAction(ISD::VP_XOR, VT, Custom);
846           setOperationAction(ISD::AND, VT, Custom);
847           setOperationAction(ISD::OR, VT, Custom);
848           setOperationAction(ISD::XOR, VT, Custom);
849           continue;
850         }
851 
852         // Use SPLAT_VECTOR to prevent type legalization from destroying the
853         // splats when type legalizing i64 scalar on RV32.
854         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
855         // improvements first.
856         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
857           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
858           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
859         }
860 
861         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
862         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
863 
864         setOperationAction(ISD::MLOAD, VT, Custom);
865         setOperationAction(ISD::MSTORE, VT, Custom);
866         setOperationAction(ISD::MGATHER, VT, Custom);
867         setOperationAction(ISD::MSCATTER, VT, Custom);
868 
869         setOperationAction(ISD::VP_LOAD, VT, Custom);
870         setOperationAction(ISD::VP_STORE, VT, Custom);
871         setOperationAction(ISD::VP_GATHER, VT, Custom);
872         setOperationAction(ISD::VP_SCATTER, VT, Custom);
873 
874         setOperationAction(ISD::ADD, VT, Custom);
875         setOperationAction(ISD::MUL, VT, Custom);
876         setOperationAction(ISD::SUB, VT, Custom);
877         setOperationAction(ISD::AND, VT, Custom);
878         setOperationAction(ISD::OR, VT, Custom);
879         setOperationAction(ISD::XOR, VT, Custom);
880         setOperationAction(ISD::SDIV, VT, Custom);
881         setOperationAction(ISD::SREM, VT, Custom);
882         setOperationAction(ISD::UDIV, VT, Custom);
883         setOperationAction(ISD::UREM, VT, Custom);
884         setOperationAction(ISD::SHL, VT, Custom);
885         setOperationAction(ISD::SRA, VT, Custom);
886         setOperationAction(ISD::SRL, VT, Custom);
887 
888         setOperationAction(ISD::SMIN, VT, Custom);
889         setOperationAction(ISD::SMAX, VT, Custom);
890         setOperationAction(ISD::UMIN, VT, Custom);
891         setOperationAction(ISD::UMAX, VT, Custom);
892         setOperationAction(ISD::ABS,  VT, Custom);
893 
894         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
895         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
896           setOperationAction(ISD::MULHS, VT, Custom);
897           setOperationAction(ISD::MULHU, VT, Custom);
898         }
899 
900         setOperationAction(ISD::SADDSAT, VT, Custom);
901         setOperationAction(ISD::UADDSAT, VT, Custom);
902         setOperationAction(ISD::SSUBSAT, VT, Custom);
903         setOperationAction(ISD::USUBSAT, VT, Custom);
904 
905         setOperationAction(ISD::VSELECT, VT, Custom);
906         setOperationAction(ISD::SELECT_CC, VT, Expand);
907 
908         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
909         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
910         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
911 
912         // Custom-lower reduction operations to set up the corresponding custom
913         // nodes' operands.
914         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
915         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
916         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
917         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
918         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
919 
920         for (unsigned VPOpc : IntegerVPOps)
921           setOperationAction(VPOpc, VT, Custom);
922 
923         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
924         // type that can represent the value exactly.
925         if (VT.getVectorElementType() != MVT::i64) {
926           MVT FloatEltVT =
927               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
928           EVT FloatVT =
929               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
930           if (isTypeLegal(FloatVT)) {
931             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
932             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
933           }
934         }
935       }
936 
937       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
938         if (!useRVVForFixedLengthVectorVT(VT))
939           continue;
940 
941         // By default everything must be expanded.
942         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
943           setOperationAction(Op, VT, Expand);
944         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
945           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
946           setTruncStoreAction(VT, OtherVT, Expand);
947         }
948 
949         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
950         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
951         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
952 
953         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
954         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
955         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
956         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
957         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
958 
959         setOperationAction(ISD::LOAD, VT, Custom);
960         setOperationAction(ISD::STORE, VT, Custom);
961         setOperationAction(ISD::MLOAD, VT, Custom);
962         setOperationAction(ISD::MSTORE, VT, Custom);
963         setOperationAction(ISD::MGATHER, VT, Custom);
964         setOperationAction(ISD::MSCATTER, VT, Custom);
965 
966         setOperationAction(ISD::VP_LOAD, VT, Custom);
967         setOperationAction(ISD::VP_STORE, VT, Custom);
968         setOperationAction(ISD::VP_GATHER, VT, Custom);
969         setOperationAction(ISD::VP_SCATTER, VT, Custom);
970 
971         setOperationAction(ISD::FADD, VT, Custom);
972         setOperationAction(ISD::FSUB, VT, Custom);
973         setOperationAction(ISD::FMUL, VT, Custom);
974         setOperationAction(ISD::FDIV, VT, Custom);
975         setOperationAction(ISD::FNEG, VT, Custom);
976         setOperationAction(ISD::FABS, VT, Custom);
977         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
978         setOperationAction(ISD::FSQRT, VT, Custom);
979         setOperationAction(ISD::FMA, VT, Custom);
980         setOperationAction(ISD::FMINNUM, VT, Custom);
981         setOperationAction(ISD::FMAXNUM, VT, Custom);
982 
983         setOperationAction(ISD::FP_ROUND, VT, Custom);
984         setOperationAction(ISD::FP_EXTEND, VT, Custom);
985 
986         setOperationAction(ISD::FTRUNC, VT, Custom);
987         setOperationAction(ISD::FCEIL, VT, Custom);
988         setOperationAction(ISD::FFLOOR, VT, Custom);
989         setOperationAction(ISD::FROUND, VT, Custom);
990 
991         for (auto CC : VFPCCToExpand)
992           setCondCodeAction(CC, VT, Expand);
993 
994         setOperationAction(ISD::VSELECT, VT, Custom);
995         setOperationAction(ISD::SELECT, VT, Custom);
996         setOperationAction(ISD::SELECT_CC, VT, Expand);
997 
998         setOperationAction(ISD::BITCAST, VT, Custom);
999 
1000         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1001         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1002         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1003         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1004 
1005         for (unsigned VPOpc : FloatingPointVPOps)
1006           setOperationAction(VPOpc, VT, Custom);
1007       }
1008 
1009       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1010       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1011       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1012       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1013       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1014       if (Subtarget.hasStdExtZfh())
1015         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1016       if (Subtarget.hasStdExtF())
1017         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1018       if (Subtarget.hasStdExtD())
1019         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1020     }
1021   }
1022 
1023   // Function alignments.
1024   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1025   setMinFunctionAlignment(FunctionAlignment);
1026   setPrefFunctionAlignment(FunctionAlignment);
1027 
1028   setMinimumJumpTableEntries(5);
1029 
1030   // Jumps are expensive, compared to logic
1031   setJumpIsExpensive();
1032 
1033   setTargetDAGCombine(ISD::ADD);
1034   setTargetDAGCombine(ISD::SUB);
1035   setTargetDAGCombine(ISD::AND);
1036   setTargetDAGCombine(ISD::OR);
1037   setTargetDAGCombine(ISD::XOR);
1038   if (Subtarget.hasStdExtZbp()) {
1039     setTargetDAGCombine(ISD::ROTL);
1040     setTargetDAGCombine(ISD::ROTR);
1041   }
1042   setTargetDAGCombine(ISD::ANY_EXTEND);
1043   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1044   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
1045     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1046   if (Subtarget.hasStdExtF()) {
1047     setTargetDAGCombine(ISD::ZERO_EXTEND);
1048     setTargetDAGCombine(ISD::FP_TO_SINT);
1049     setTargetDAGCombine(ISD::FP_TO_UINT);
1050     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1051     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1052   }
1053   if (Subtarget.hasVInstructions()) {
1054     setTargetDAGCombine(ISD::FCOPYSIGN);
1055     setTargetDAGCombine(ISD::MGATHER);
1056     setTargetDAGCombine(ISD::MSCATTER);
1057     setTargetDAGCombine(ISD::VP_GATHER);
1058     setTargetDAGCombine(ISD::VP_SCATTER);
1059     setTargetDAGCombine(ISD::SRA);
1060     setTargetDAGCombine(ISD::SRL);
1061     setTargetDAGCombine(ISD::SHL);
1062     setTargetDAGCombine(ISD::STORE);
1063     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1064   }
1065 
1066   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1067   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1068 }
1069 
1070 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1071                                             LLVMContext &Context,
1072                                             EVT VT) const {
1073   if (!VT.isVector())
1074     return getPointerTy(DL);
1075   if (Subtarget.hasVInstructions() &&
1076       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1077     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1078   return VT.changeVectorElementTypeToInteger();
1079 }
1080 
1081 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1082   return Subtarget.getXLenVT();
1083 }
1084 
1085 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1086                                              const CallInst &I,
1087                                              MachineFunction &MF,
1088                                              unsigned Intrinsic) const {
1089   auto &DL = I.getModule()->getDataLayout();
1090   switch (Intrinsic) {
1091   default:
1092     return false;
1093   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1094   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1095   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1096   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1097   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1098   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1099   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1100   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1101   case Intrinsic::riscv_masked_cmpxchg_i32:
1102     Info.opc = ISD::INTRINSIC_W_CHAIN;
1103     Info.memVT = MVT::i32;
1104     Info.ptrVal = I.getArgOperand(0);
1105     Info.offset = 0;
1106     Info.align = Align(4);
1107     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1108                  MachineMemOperand::MOVolatile;
1109     return true;
1110   case Intrinsic::riscv_masked_strided_load:
1111     Info.opc = ISD::INTRINSIC_W_CHAIN;
1112     Info.ptrVal = I.getArgOperand(1);
1113     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1114     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1115     Info.size = MemoryLocation::UnknownSize;
1116     Info.flags |= MachineMemOperand::MOLoad;
1117     return true;
1118   case Intrinsic::riscv_masked_strided_store:
1119     Info.opc = ISD::INTRINSIC_VOID;
1120     Info.ptrVal = I.getArgOperand(1);
1121     Info.memVT =
1122         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1123     Info.align = Align(
1124         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1125         8);
1126     Info.size = MemoryLocation::UnknownSize;
1127     Info.flags |= MachineMemOperand::MOStore;
1128     return true;
1129   case Intrinsic::riscv_seg2_load:
1130   case Intrinsic::riscv_seg3_load:
1131   case Intrinsic::riscv_seg4_load:
1132   case Intrinsic::riscv_seg5_load:
1133   case Intrinsic::riscv_seg6_load:
1134   case Intrinsic::riscv_seg7_load:
1135   case Intrinsic::riscv_seg8_load:
1136     Info.opc = ISD::INTRINSIC_W_CHAIN;
1137     Info.ptrVal = I.getArgOperand(0);
1138     Info.memVT =
1139         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1140     Info.align =
1141         Align(DL.getTypeSizeInBits(
1142                   I.getType()->getStructElementType(0)->getScalarType()) /
1143               8);
1144     Info.size = MemoryLocation::UnknownSize;
1145     Info.flags |= MachineMemOperand::MOLoad;
1146     return true;
1147   }
1148 }
1149 
1150 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1151                                                 const AddrMode &AM, Type *Ty,
1152                                                 unsigned AS,
1153                                                 Instruction *I) const {
1154   // No global is ever allowed as a base.
1155   if (AM.BaseGV)
1156     return false;
1157 
1158   // Require a 12-bit signed offset.
1159   if (!isInt<12>(AM.BaseOffs))
1160     return false;
1161 
1162   switch (AM.Scale) {
1163   case 0: // "r+i" or just "i", depending on HasBaseReg.
1164     break;
1165   case 1:
1166     if (!AM.HasBaseReg) // allow "r+i".
1167       break;
1168     return false; // disallow "r+r" or "r+r+i".
1169   default:
1170     return false;
1171   }
1172 
1173   return true;
1174 }
1175 
1176 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1177   return isInt<12>(Imm);
1178 }
1179 
1180 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1181   return isInt<12>(Imm);
1182 }
1183 
1184 // On RV32, 64-bit integers are split into their high and low parts and held
1185 // in two different registers, so the trunc is free since the low register can
1186 // just be used.
1187 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1188   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1189     return false;
1190   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1191   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1192   return (SrcBits == 64 && DestBits == 32);
1193 }
1194 
1195 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1196   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1197       !SrcVT.isInteger() || !DstVT.isInteger())
1198     return false;
1199   unsigned SrcBits = SrcVT.getSizeInBits();
1200   unsigned DestBits = DstVT.getSizeInBits();
1201   return (SrcBits == 64 && DestBits == 32);
1202 }
1203 
1204 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1205   // Zexts are free if they can be combined with a load.
1206   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1207   // poorly with type legalization of compares preferring sext.
1208   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1209     EVT MemVT = LD->getMemoryVT();
1210     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1211         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1212          LD->getExtensionType() == ISD::ZEXTLOAD))
1213       return true;
1214   }
1215 
1216   return TargetLowering::isZExtFree(Val, VT2);
1217 }
1218 
1219 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1220   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1221 }
1222 
1223 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1224   return Subtarget.hasStdExtZbb();
1225 }
1226 
1227 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1228   return Subtarget.hasStdExtZbb();
1229 }
1230 
1231 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1232   EVT VT = Y.getValueType();
1233 
1234   // FIXME: Support vectors once we have tests.
1235   if (VT.isVector())
1236     return false;
1237 
1238   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1239           Subtarget.hasStdExtZbkb()) &&
1240          !isa<ConstantSDNode>(Y);
1241 }
1242 
1243 /// Check if sinking \p I's operands to I's basic block is profitable, because
1244 /// the operands can be folded into a target instruction, e.g.
1245 /// splats of scalars can fold into vector instructions.
1246 bool RISCVTargetLowering::shouldSinkOperands(
1247     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1248   using namespace llvm::PatternMatch;
1249 
1250   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1251     return false;
1252 
1253   auto IsSinker = [&](Instruction *I, int Operand) {
1254     switch (I->getOpcode()) {
1255     case Instruction::Add:
1256     case Instruction::Sub:
1257     case Instruction::Mul:
1258     case Instruction::And:
1259     case Instruction::Or:
1260     case Instruction::Xor:
1261     case Instruction::FAdd:
1262     case Instruction::FSub:
1263     case Instruction::FMul:
1264     case Instruction::FDiv:
1265     case Instruction::ICmp:
1266     case Instruction::FCmp:
1267       return true;
1268     case Instruction::Shl:
1269     case Instruction::LShr:
1270     case Instruction::AShr:
1271     case Instruction::UDiv:
1272     case Instruction::SDiv:
1273     case Instruction::URem:
1274     case Instruction::SRem:
1275       return Operand == 1;
1276     case Instruction::Call:
1277       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1278         switch (II->getIntrinsicID()) {
1279         case Intrinsic::fma:
1280         case Intrinsic::vp_fma:
1281           return Operand == 0 || Operand == 1;
1282         // FIXME: Our patterns can only match vx/vf instructions when the splat
1283         // it on the RHS, because TableGen doesn't recognize our VP operations
1284         // as commutative.
1285         case Intrinsic::vp_add:
1286         case Intrinsic::vp_mul:
1287         case Intrinsic::vp_and:
1288         case Intrinsic::vp_or:
1289         case Intrinsic::vp_xor:
1290         case Intrinsic::vp_fadd:
1291         case Intrinsic::vp_fmul:
1292         case Intrinsic::vp_shl:
1293         case Intrinsic::vp_lshr:
1294         case Intrinsic::vp_ashr:
1295         case Intrinsic::vp_udiv:
1296         case Intrinsic::vp_sdiv:
1297         case Intrinsic::vp_urem:
1298         case Intrinsic::vp_srem:
1299           return Operand == 1;
1300         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1301         // explicit patterns for both LHS and RHS (as 'vr' versions).
1302         case Intrinsic::vp_sub:
1303         case Intrinsic::vp_fsub:
1304         case Intrinsic::vp_fdiv:
1305           return Operand == 0 || Operand == 1;
1306         default:
1307           return false;
1308         }
1309       }
1310       return false;
1311     default:
1312       return false;
1313     }
1314   };
1315 
1316   for (auto OpIdx : enumerate(I->operands())) {
1317     if (!IsSinker(I, OpIdx.index()))
1318       continue;
1319 
1320     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1321     // Make sure we are not already sinking this operand
1322     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1323       continue;
1324 
1325     // We are looking for a splat that can be sunk.
1326     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1327                              m_Undef(), m_ZeroMask())))
1328       continue;
1329 
1330     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1331     // and vector registers
1332     for (Use &U : Op->uses()) {
1333       Instruction *Insn = cast<Instruction>(U.getUser());
1334       if (!IsSinker(Insn, U.getOperandNo()))
1335         return false;
1336     }
1337 
1338     Ops.push_back(&Op->getOperandUse(0));
1339     Ops.push_back(&OpIdx.value());
1340   }
1341   return true;
1342 }
1343 
1344 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1345                                        bool ForCodeSize) const {
1346   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1347   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1348     return false;
1349   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1350     return false;
1351   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1352     return false;
1353   return Imm.isZero();
1354 }
1355 
1356 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1357   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1358          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1359          (VT == MVT::f64 && Subtarget.hasStdExtD());
1360 }
1361 
1362 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1363                                                       CallingConv::ID CC,
1364                                                       EVT VT) const {
1365   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1366   // We might still end up using a GPR but that will be decided based on ABI.
1367   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1368   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1369     return MVT::f32;
1370 
1371   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1372 }
1373 
1374 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1375                                                            CallingConv::ID CC,
1376                                                            EVT VT) const {
1377   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1378   // We might still end up using a GPR but that will be decided based on ABI.
1379   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1380   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1381     return 1;
1382 
1383   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1384 }
1385 
1386 // Changes the condition code and swaps operands if necessary, so the SetCC
1387 // operation matches one of the comparisons supported directly by branches
1388 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1389 // with 1/-1.
1390 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1391                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1392   // Convert X > -1 to X >= 0.
1393   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1394     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1395     CC = ISD::SETGE;
1396     return;
1397   }
1398   // Convert X < 1 to 0 >= X.
1399   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1400     RHS = LHS;
1401     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1402     CC = ISD::SETGE;
1403     return;
1404   }
1405 
1406   switch (CC) {
1407   default:
1408     break;
1409   case ISD::SETGT:
1410   case ISD::SETLE:
1411   case ISD::SETUGT:
1412   case ISD::SETULE:
1413     CC = ISD::getSetCCSwappedOperands(CC);
1414     std::swap(LHS, RHS);
1415     break;
1416   }
1417 }
1418 
1419 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1420   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1421   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1422   if (VT.getVectorElementType() == MVT::i1)
1423     KnownSize *= 8;
1424 
1425   switch (KnownSize) {
1426   default:
1427     llvm_unreachable("Invalid LMUL.");
1428   case 8:
1429     return RISCVII::VLMUL::LMUL_F8;
1430   case 16:
1431     return RISCVII::VLMUL::LMUL_F4;
1432   case 32:
1433     return RISCVII::VLMUL::LMUL_F2;
1434   case 64:
1435     return RISCVII::VLMUL::LMUL_1;
1436   case 128:
1437     return RISCVII::VLMUL::LMUL_2;
1438   case 256:
1439     return RISCVII::VLMUL::LMUL_4;
1440   case 512:
1441     return RISCVII::VLMUL::LMUL_8;
1442   }
1443 }
1444 
1445 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1446   switch (LMul) {
1447   default:
1448     llvm_unreachable("Invalid LMUL.");
1449   case RISCVII::VLMUL::LMUL_F8:
1450   case RISCVII::VLMUL::LMUL_F4:
1451   case RISCVII::VLMUL::LMUL_F2:
1452   case RISCVII::VLMUL::LMUL_1:
1453     return RISCV::VRRegClassID;
1454   case RISCVII::VLMUL::LMUL_2:
1455     return RISCV::VRM2RegClassID;
1456   case RISCVII::VLMUL::LMUL_4:
1457     return RISCV::VRM4RegClassID;
1458   case RISCVII::VLMUL::LMUL_8:
1459     return RISCV::VRM8RegClassID;
1460   }
1461 }
1462 
1463 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1464   RISCVII::VLMUL LMUL = getLMUL(VT);
1465   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1466       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1467       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1468       LMUL == RISCVII::VLMUL::LMUL_1) {
1469     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1470                   "Unexpected subreg numbering");
1471     return RISCV::sub_vrm1_0 + Index;
1472   }
1473   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1474     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1475                   "Unexpected subreg numbering");
1476     return RISCV::sub_vrm2_0 + Index;
1477   }
1478   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1479     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1480                   "Unexpected subreg numbering");
1481     return RISCV::sub_vrm4_0 + Index;
1482   }
1483   llvm_unreachable("Invalid vector type.");
1484 }
1485 
1486 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1487   if (VT.getVectorElementType() == MVT::i1)
1488     return RISCV::VRRegClassID;
1489   return getRegClassIDForLMUL(getLMUL(VT));
1490 }
1491 
1492 // Attempt to decompose a subvector insert/extract between VecVT and
1493 // SubVecVT via subregister indices. Returns the subregister index that
1494 // can perform the subvector insert/extract with the given element index, as
1495 // well as the index corresponding to any leftover subvectors that must be
1496 // further inserted/extracted within the register class for SubVecVT.
1497 std::pair<unsigned, unsigned>
1498 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1499     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1500     const RISCVRegisterInfo *TRI) {
1501   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1502                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1503                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1504                 "Register classes not ordered");
1505   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1506   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1507   // Try to compose a subregister index that takes us from the incoming
1508   // LMUL>1 register class down to the outgoing one. At each step we half
1509   // the LMUL:
1510   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1511   // Note that this is not guaranteed to find a subregister index, such as
1512   // when we are extracting from one VR type to another.
1513   unsigned SubRegIdx = RISCV::NoSubRegister;
1514   for (const unsigned RCID :
1515        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1516     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1517       VecVT = VecVT.getHalfNumVectorElementsVT();
1518       bool IsHi =
1519           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1520       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1521                                             getSubregIndexByMVT(VecVT, IsHi));
1522       if (IsHi)
1523         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1524     }
1525   return {SubRegIdx, InsertExtractIdx};
1526 }
1527 
1528 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1529 // stores for those types.
1530 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1531   return !Subtarget.useRVVForFixedLengthVectors() ||
1532          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1533 }
1534 
1535 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1536   if (ScalarTy->isPointerTy())
1537     return true;
1538 
1539   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1540       ScalarTy->isIntegerTy(32))
1541     return true;
1542 
1543   if (ScalarTy->isIntegerTy(64))
1544     return Subtarget.hasVInstructionsI64();
1545 
1546   if (ScalarTy->isHalfTy())
1547     return Subtarget.hasVInstructionsF16();
1548   if (ScalarTy->isFloatTy())
1549     return Subtarget.hasVInstructionsF32();
1550   if (ScalarTy->isDoubleTy())
1551     return Subtarget.hasVInstructionsF64();
1552 
1553   return false;
1554 }
1555 
1556 static SDValue getVLOperand(SDValue Op) {
1557   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1558           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1559          "Unexpected opcode");
1560   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1561   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1562   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1563       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1564   if (!II)
1565     return SDValue();
1566   return Op.getOperand(II->VLOperand + 1 + HasChain);
1567 }
1568 
1569 static bool useRVVForFixedLengthVectorVT(MVT VT,
1570                                          const RISCVSubtarget &Subtarget) {
1571   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1572   if (!Subtarget.useRVVForFixedLengthVectors())
1573     return false;
1574 
1575   // We only support a set of vector types with a consistent maximum fixed size
1576   // across all supported vector element types to avoid legalization issues.
1577   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1578   // fixed-length vector type we support is 1024 bytes.
1579   if (VT.getFixedSizeInBits() > 1024 * 8)
1580     return false;
1581 
1582   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1583 
1584   MVT EltVT = VT.getVectorElementType();
1585 
1586   // Don't use RVV for vectors we cannot scalarize if required.
1587   switch (EltVT.SimpleTy) {
1588   // i1 is supported but has different rules.
1589   default:
1590     return false;
1591   case MVT::i1:
1592     // Masks can only use a single register.
1593     if (VT.getVectorNumElements() > MinVLen)
1594       return false;
1595     MinVLen /= 8;
1596     break;
1597   case MVT::i8:
1598   case MVT::i16:
1599   case MVT::i32:
1600     break;
1601   case MVT::i64:
1602     if (!Subtarget.hasVInstructionsI64())
1603       return false;
1604     break;
1605   case MVT::f16:
1606     if (!Subtarget.hasVInstructionsF16())
1607       return false;
1608     break;
1609   case MVT::f32:
1610     if (!Subtarget.hasVInstructionsF32())
1611       return false;
1612     break;
1613   case MVT::f64:
1614     if (!Subtarget.hasVInstructionsF64())
1615       return false;
1616     break;
1617   }
1618 
1619   // Reject elements larger than ELEN.
1620   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1621     return false;
1622 
1623   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1624   // Don't use RVV for types that don't fit.
1625   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1626     return false;
1627 
1628   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1629   // the base fixed length RVV support in place.
1630   if (!VT.isPow2VectorType())
1631     return false;
1632 
1633   return true;
1634 }
1635 
1636 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1637   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1638 }
1639 
1640 // Return the largest legal scalable vector type that matches VT's element type.
1641 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1642                                             const RISCVSubtarget &Subtarget) {
1643   // This may be called before legal types are setup.
1644   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1645           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1646          "Expected legal fixed length vector!");
1647 
1648   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1649   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1650 
1651   MVT EltVT = VT.getVectorElementType();
1652   switch (EltVT.SimpleTy) {
1653   default:
1654     llvm_unreachable("unexpected element type for RVV container");
1655   case MVT::i1:
1656   case MVT::i8:
1657   case MVT::i16:
1658   case MVT::i32:
1659   case MVT::i64:
1660   case MVT::f16:
1661   case MVT::f32:
1662   case MVT::f64: {
1663     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1664     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1665     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1666     unsigned NumElts =
1667         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1668     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1669     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1670     return MVT::getScalableVectorVT(EltVT, NumElts);
1671   }
1672   }
1673 }
1674 
1675 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1676                                             const RISCVSubtarget &Subtarget) {
1677   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1678                                           Subtarget);
1679 }
1680 
1681 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1682   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1683 }
1684 
1685 // Grow V to consume an entire RVV register.
1686 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1687                                        const RISCVSubtarget &Subtarget) {
1688   assert(VT.isScalableVector() &&
1689          "Expected to convert into a scalable vector!");
1690   assert(V.getValueType().isFixedLengthVector() &&
1691          "Expected a fixed length vector operand!");
1692   SDLoc DL(V);
1693   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1694   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1695 }
1696 
1697 // Shrink V so it's just big enough to maintain a VT's worth of data.
1698 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1699                                          const RISCVSubtarget &Subtarget) {
1700   assert(VT.isFixedLengthVector() &&
1701          "Expected to convert into a fixed length vector!");
1702   assert(V.getValueType().isScalableVector() &&
1703          "Expected a scalable vector operand!");
1704   SDLoc DL(V);
1705   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1706   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1707 }
1708 
1709 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1710 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1711 // the vector type that it is contained in.
1712 static std::pair<SDValue, SDValue>
1713 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1714                 const RISCVSubtarget &Subtarget) {
1715   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1716   MVT XLenVT = Subtarget.getXLenVT();
1717   SDValue VL = VecVT.isFixedLengthVector()
1718                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1719                    : DAG.getRegister(RISCV::X0, XLenVT);
1720   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1721   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1722   return {Mask, VL};
1723 }
1724 
1725 // As above but assuming the given type is a scalable vector type.
1726 static std::pair<SDValue, SDValue>
1727 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1728                         const RISCVSubtarget &Subtarget) {
1729   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1730   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1731 }
1732 
1733 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1734 // of either is (currently) supported. This can get us into an infinite loop
1735 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1736 // as a ..., etc.
1737 // Until either (or both) of these can reliably lower any node, reporting that
1738 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1739 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1740 // which is not desirable.
1741 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1742     EVT VT, unsigned DefinedValues) const {
1743   return false;
1744 }
1745 
1746 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1747                                   const RISCVSubtarget &Subtarget) {
1748   // RISCV FP-to-int conversions saturate to the destination register size, but
1749   // don't produce 0 for nan. We can use a conversion instruction and fix the
1750   // nan case with a compare and a select.
1751   SDValue Src = Op.getOperand(0);
1752 
1753   EVT DstVT = Op.getValueType();
1754   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1755 
1756   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1757   unsigned Opc;
1758   if (SatVT == DstVT)
1759     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1760   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1761     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1762   else
1763     return SDValue();
1764   // FIXME: Support other SatVTs by clamping before or after the conversion.
1765 
1766   SDLoc DL(Op);
1767   SDValue FpToInt = DAG.getNode(
1768       Opc, DL, DstVT, Src,
1769       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1770 
1771   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1772   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1773 }
1774 
1775 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1776 // and back. Taking care to avoid converting values that are nan or already
1777 // correct.
1778 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1779 // have FRM dependencies modeled yet.
1780 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1781   MVT VT = Op.getSimpleValueType();
1782   assert(VT.isVector() && "Unexpected type");
1783 
1784   SDLoc DL(Op);
1785 
1786   // Freeze the source since we are increasing the number of uses.
1787   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1788 
1789   // Truncate to integer and convert back to FP.
1790   MVT IntVT = VT.changeVectorElementTypeToInteger();
1791   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1792   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1793 
1794   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1795 
1796   if (Op.getOpcode() == ISD::FCEIL) {
1797     // If the truncated value is the greater than or equal to the original
1798     // value, we've computed the ceil. Otherwise, we went the wrong way and
1799     // need to increase by 1.
1800     // FIXME: This should use a masked operation. Handle here or in isel?
1801     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1802                                  DAG.getConstantFP(1.0, DL, VT));
1803     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1804     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1805   } else if (Op.getOpcode() == ISD::FFLOOR) {
1806     // If the truncated value is the less than or equal to the original value,
1807     // we've computed the floor. Otherwise, we went the wrong way and need to
1808     // decrease by 1.
1809     // FIXME: This should use a masked operation. Handle here or in isel?
1810     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1811                                  DAG.getConstantFP(1.0, DL, VT));
1812     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1813     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1814   }
1815 
1816   // Restore the original sign so that -0.0 is preserved.
1817   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1818 
1819   // Determine the largest integer that can be represented exactly. This and
1820   // values larger than it don't have any fractional bits so don't need to
1821   // be converted.
1822   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1823   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1824   APFloat MaxVal = APFloat(FltSem);
1825   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1826                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1827   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1828 
1829   // If abs(Src) was larger than MaxVal or nan, keep it.
1830   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1831   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1832   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1833 }
1834 
1835 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1836 // This mode isn't supported in vector hardware on RISCV. But as long as we
1837 // aren't compiling with trapping math, we can emulate this with
1838 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1839 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1840 // dependencies modeled yet.
1841 // FIXME: Use masked operations to avoid final merge.
1842 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1843   MVT VT = Op.getSimpleValueType();
1844   assert(VT.isVector() && "Unexpected type");
1845 
1846   SDLoc DL(Op);
1847 
1848   // Freeze the source since we are increasing the number of uses.
1849   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1850 
1851   // We do the conversion on the absolute value and fix the sign at the end.
1852   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1853 
1854   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1855   bool Ignored;
1856   APFloat Point5Pred = APFloat(0.5f);
1857   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1858   Point5Pred.next(/*nextDown*/ true);
1859 
1860   // Add the adjustment.
1861   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1862                                DAG.getConstantFP(Point5Pred, DL, VT));
1863 
1864   // Truncate to integer and convert back to fp.
1865   MVT IntVT = VT.changeVectorElementTypeToInteger();
1866   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1867   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1868 
1869   // Restore the original sign.
1870   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1871 
1872   // Determine the largest integer that can be represented exactly. This and
1873   // values larger than it don't have any fractional bits so don't need to
1874   // be converted.
1875   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1876   APFloat MaxVal = APFloat(FltSem);
1877   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1878                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1879   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1880 
1881   // If abs(Src) was larger than MaxVal or nan, keep it.
1882   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1883   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1884   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1885 }
1886 
1887 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1888                                  const RISCVSubtarget &Subtarget) {
1889   MVT VT = Op.getSimpleValueType();
1890   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1891 
1892   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1893 
1894   SDLoc DL(Op);
1895   SDValue Mask, VL;
1896   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1897 
1898   unsigned Opc =
1899       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1900   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
1901                               Op.getOperand(0), VL);
1902   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1903 }
1904 
1905 struct VIDSequence {
1906   int64_t StepNumerator;
1907   unsigned StepDenominator;
1908   int64_t Addend;
1909 };
1910 
1911 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1912 // to the (non-zero) step S and start value X. This can be then lowered as the
1913 // RVV sequence (VID * S) + X, for example.
1914 // The step S is represented as an integer numerator divided by a positive
1915 // denominator. Note that the implementation currently only identifies
1916 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1917 // cannot detect 2/3, for example.
1918 // Note that this method will also match potentially unappealing index
1919 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1920 // determine whether this is worth generating code for.
1921 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1922   unsigned NumElts = Op.getNumOperands();
1923   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1924   if (!Op.getValueType().isInteger())
1925     return None;
1926 
1927   Optional<unsigned> SeqStepDenom;
1928   Optional<int64_t> SeqStepNum, SeqAddend;
1929   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1930   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1931   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1932     // Assume undef elements match the sequence; we just have to be careful
1933     // when interpolating across them.
1934     if (Op.getOperand(Idx).isUndef())
1935       continue;
1936     // The BUILD_VECTOR must be all constants.
1937     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1938       return None;
1939 
1940     uint64_t Val = Op.getConstantOperandVal(Idx) &
1941                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1942 
1943     if (PrevElt) {
1944       // Calculate the step since the last non-undef element, and ensure
1945       // it's consistent across the entire sequence.
1946       unsigned IdxDiff = Idx - PrevElt->second;
1947       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1948 
1949       // A zero-value value difference means that we're somewhere in the middle
1950       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1951       // step change before evaluating the sequence.
1952       if (ValDiff != 0) {
1953         int64_t Remainder = ValDiff % IdxDiff;
1954         // Normalize the step if it's greater than 1.
1955         if (Remainder != ValDiff) {
1956           // The difference must cleanly divide the element span.
1957           if (Remainder != 0)
1958             return None;
1959           ValDiff /= IdxDiff;
1960           IdxDiff = 1;
1961         }
1962 
1963         if (!SeqStepNum)
1964           SeqStepNum = ValDiff;
1965         else if (ValDiff != SeqStepNum)
1966           return None;
1967 
1968         if (!SeqStepDenom)
1969           SeqStepDenom = IdxDiff;
1970         else if (IdxDiff != *SeqStepDenom)
1971           return None;
1972       }
1973     }
1974 
1975     // Record and/or check any addend.
1976     if (SeqStepNum && SeqStepDenom) {
1977       uint64_t ExpectedVal =
1978           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1979       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1980       if (!SeqAddend)
1981         SeqAddend = Addend;
1982       else if (SeqAddend != Addend)
1983         return None;
1984     }
1985 
1986     // Record this non-undef element for later.
1987     if (!PrevElt || PrevElt->first != Val)
1988       PrevElt = std::make_pair(Val, Idx);
1989   }
1990   // We need to have logged both a step and an addend for this to count as
1991   // a legal index sequence.
1992   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1993     return None;
1994 
1995   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1996 }
1997 
1998 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1999 // and lower it as a VRGATHER_VX_VL from the source vector.
2000 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
2001                                   SelectionDAG &DAG,
2002                                   const RISCVSubtarget &Subtarget) {
2003   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2004     return SDValue();
2005   SDValue Vec = SplatVal.getOperand(0);
2006   // Only perform this optimization on vectors of the same size for simplicity.
2007   if (Vec.getValueType() != VT)
2008     return SDValue();
2009   SDValue Idx = SplatVal.getOperand(1);
2010   // The index must be a legal type.
2011   if (Idx.getValueType() != Subtarget.getXLenVT())
2012     return SDValue();
2013 
2014   MVT ContainerVT = VT;
2015   if (VT.isFixedLengthVector()) {
2016     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2017     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2018   }
2019 
2020   SDValue Mask, VL;
2021   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2022 
2023   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2024                                Idx, Mask, VL);
2025 
2026   if (!VT.isFixedLengthVector())
2027     return Gather;
2028 
2029   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2030 }
2031 
2032 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2033                                  const RISCVSubtarget &Subtarget) {
2034   MVT VT = Op.getSimpleValueType();
2035   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2036 
2037   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2038 
2039   SDLoc DL(Op);
2040   SDValue Mask, VL;
2041   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2042 
2043   MVT XLenVT = Subtarget.getXLenVT();
2044   unsigned NumElts = Op.getNumOperands();
2045 
2046   if (VT.getVectorElementType() == MVT::i1) {
2047     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2048       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2049       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2050     }
2051 
2052     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2053       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2054       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2055     }
2056 
2057     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2058     // scalar integer chunks whose bit-width depends on the number of mask
2059     // bits and XLEN.
2060     // First, determine the most appropriate scalar integer type to use. This
2061     // is at most XLenVT, but may be shrunk to a smaller vector element type
2062     // according to the size of the final vector - use i8 chunks rather than
2063     // XLenVT if we're producing a v8i1. This results in more consistent
2064     // codegen across RV32 and RV64.
2065     unsigned NumViaIntegerBits =
2066         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2067     NumViaIntegerBits = std::min(NumViaIntegerBits,
2068                                  Subtarget.getMaxELENForFixedLengthVectors());
2069     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2070       // If we have to use more than one INSERT_VECTOR_ELT then this
2071       // optimization is likely to increase code size; avoid peforming it in
2072       // such a case. We can use a load from a constant pool in this case.
2073       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2074         return SDValue();
2075       // Now we can create our integer vector type. Note that it may be larger
2076       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2077       MVT IntegerViaVecVT =
2078           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2079                            divideCeil(NumElts, NumViaIntegerBits));
2080 
2081       uint64_t Bits = 0;
2082       unsigned BitPos = 0, IntegerEltIdx = 0;
2083       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2084 
2085       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2086         // Once we accumulate enough bits to fill our scalar type, insert into
2087         // our vector and clear our accumulated data.
2088         if (I != 0 && I % NumViaIntegerBits == 0) {
2089           if (NumViaIntegerBits <= 32)
2090             Bits = SignExtend64(Bits, 32);
2091           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2092           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2093                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2094           Bits = 0;
2095           BitPos = 0;
2096           IntegerEltIdx++;
2097         }
2098         SDValue V = Op.getOperand(I);
2099         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2100         Bits |= ((uint64_t)BitValue << BitPos);
2101       }
2102 
2103       // Insert the (remaining) scalar value into position in our integer
2104       // vector type.
2105       if (NumViaIntegerBits <= 32)
2106         Bits = SignExtend64(Bits, 32);
2107       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2108       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2109                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2110 
2111       if (NumElts < NumViaIntegerBits) {
2112         // If we're producing a smaller vector than our minimum legal integer
2113         // type, bitcast to the equivalent (known-legal) mask type, and extract
2114         // our final mask.
2115         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2116         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2117         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2118                           DAG.getConstant(0, DL, XLenVT));
2119       } else {
2120         // Else we must have produced an integer type with the same size as the
2121         // mask type; bitcast for the final result.
2122         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2123         Vec = DAG.getBitcast(VT, Vec);
2124       }
2125 
2126       return Vec;
2127     }
2128 
2129     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2130     // vector type, we have a legal equivalently-sized i8 type, so we can use
2131     // that.
2132     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2133     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2134 
2135     SDValue WideVec;
2136     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2137       // For a splat, perform a scalar truncate before creating the wider
2138       // vector.
2139       assert(Splat.getValueType() == XLenVT &&
2140              "Unexpected type for i1 splat value");
2141       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2142                           DAG.getConstant(1, DL, XLenVT));
2143       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2144     } else {
2145       SmallVector<SDValue, 8> Ops(Op->op_values());
2146       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2147       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2148       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2149     }
2150 
2151     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2152   }
2153 
2154   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2155     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2156       return Gather;
2157     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2158                                         : RISCVISD::VMV_V_X_VL;
2159     Splat =
2160         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2161     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2162   }
2163 
2164   // Try and match index sequences, which we can lower to the vid instruction
2165   // with optional modifications. An all-undef vector is matched by
2166   // getSplatValue, above.
2167   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2168     int64_t StepNumerator = SimpleVID->StepNumerator;
2169     unsigned StepDenominator = SimpleVID->StepDenominator;
2170     int64_t Addend = SimpleVID->Addend;
2171 
2172     assert(StepNumerator != 0 && "Invalid step");
2173     bool Negate = false;
2174     int64_t SplatStepVal = StepNumerator;
2175     unsigned StepOpcode = ISD::MUL;
2176     if (StepNumerator != 1) {
2177       if (isPowerOf2_64(std::abs(StepNumerator))) {
2178         Negate = StepNumerator < 0;
2179         StepOpcode = ISD::SHL;
2180         SplatStepVal = Log2_64(std::abs(StepNumerator));
2181       }
2182     }
2183 
2184     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2185     // threshold since it's the immediate value many RVV instructions accept.
2186     // There is no vmul.vi instruction so ensure multiply constant can fit in
2187     // a single addi instruction.
2188     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2189          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2190         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2191       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2192       // Convert right out of the scalable type so we can use standard ISD
2193       // nodes for the rest of the computation. If we used scalable types with
2194       // these, we'd lose the fixed-length vector info and generate worse
2195       // vsetvli code.
2196       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2197       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2198           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2199         SDValue SplatStep = DAG.getSplatVector(
2200             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2201         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2202       }
2203       if (StepDenominator != 1) {
2204         SDValue SplatStep = DAG.getSplatVector(
2205             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2206         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2207       }
2208       if (Addend != 0 || Negate) {
2209         SDValue SplatAddend =
2210             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2211         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2212       }
2213       return VID;
2214     }
2215   }
2216 
2217   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2218   // when re-interpreted as a vector with a larger element type. For example,
2219   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2220   // could be instead splat as
2221   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2222   // TODO: This optimization could also work on non-constant splats, but it
2223   // would require bit-manipulation instructions to construct the splat value.
2224   SmallVector<SDValue> Sequence;
2225   unsigned EltBitSize = VT.getScalarSizeInBits();
2226   const auto *BV = cast<BuildVectorSDNode>(Op);
2227   if (VT.isInteger() && EltBitSize < 64 &&
2228       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2229       BV->getRepeatedSequence(Sequence) &&
2230       (Sequence.size() * EltBitSize) <= 64) {
2231     unsigned SeqLen = Sequence.size();
2232     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2233     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2234     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2235             ViaIntVT == MVT::i64) &&
2236            "Unexpected sequence type");
2237 
2238     unsigned EltIdx = 0;
2239     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2240     uint64_t SplatValue = 0;
2241     // Construct the amalgamated value which can be splatted as this larger
2242     // vector type.
2243     for (const auto &SeqV : Sequence) {
2244       if (!SeqV.isUndef())
2245         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2246                        << (EltIdx * EltBitSize));
2247       EltIdx++;
2248     }
2249 
2250     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2251     // achieve better constant materializion.
2252     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2253       SplatValue = SignExtend64(SplatValue, 32);
2254 
2255     // Since we can't introduce illegal i64 types at this stage, we can only
2256     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2257     // way we can use RVV instructions to splat.
2258     assert((ViaIntVT.bitsLE(XLenVT) ||
2259             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2260            "Unexpected bitcast sequence");
2261     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2262       SDValue ViaVL =
2263           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2264       MVT ViaContainerVT =
2265           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2266       SDValue Splat =
2267           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2268                       DAG.getUNDEF(ViaContainerVT),
2269                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2270       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2271       return DAG.getBitcast(VT, Splat);
2272     }
2273   }
2274 
2275   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2276   // which constitute a large proportion of the elements. In such cases we can
2277   // splat a vector with the dominant element and make up the shortfall with
2278   // INSERT_VECTOR_ELTs.
2279   // Note that this includes vectors of 2 elements by association. The
2280   // upper-most element is the "dominant" one, allowing us to use a splat to
2281   // "insert" the upper element, and an insert of the lower element at position
2282   // 0, which improves codegen.
2283   SDValue DominantValue;
2284   unsigned MostCommonCount = 0;
2285   DenseMap<SDValue, unsigned> ValueCounts;
2286   unsigned NumUndefElts =
2287       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2288 
2289   // Track the number of scalar loads we know we'd be inserting, estimated as
2290   // any non-zero floating-point constant. Other kinds of element are either
2291   // already in registers or are materialized on demand. The threshold at which
2292   // a vector load is more desirable than several scalar materializion and
2293   // vector-insertion instructions is not known.
2294   unsigned NumScalarLoads = 0;
2295 
2296   for (SDValue V : Op->op_values()) {
2297     if (V.isUndef())
2298       continue;
2299 
2300     ValueCounts.insert(std::make_pair(V, 0));
2301     unsigned &Count = ValueCounts[V];
2302 
2303     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2304       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2305 
2306     // Is this value dominant? In case of a tie, prefer the highest element as
2307     // it's cheaper to insert near the beginning of a vector than it is at the
2308     // end.
2309     if (++Count >= MostCommonCount) {
2310       DominantValue = V;
2311       MostCommonCount = Count;
2312     }
2313   }
2314 
2315   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2316   unsigned NumDefElts = NumElts - NumUndefElts;
2317   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2318 
2319   // Don't perform this optimization when optimizing for size, since
2320   // materializing elements and inserting them tends to cause code bloat.
2321   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2322       ((MostCommonCount > DominantValueCountThreshold) ||
2323        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2324     // Start by splatting the most common element.
2325     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2326 
2327     DenseSet<SDValue> Processed{DominantValue};
2328     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2329     for (const auto &OpIdx : enumerate(Op->ops())) {
2330       const SDValue &V = OpIdx.value();
2331       if (V.isUndef() || !Processed.insert(V).second)
2332         continue;
2333       if (ValueCounts[V] == 1) {
2334         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2335                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2336       } else {
2337         // Blend in all instances of this value using a VSELECT, using a
2338         // mask where each bit signals whether that element is the one
2339         // we're after.
2340         SmallVector<SDValue> Ops;
2341         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2342           return DAG.getConstant(V == V1, DL, XLenVT);
2343         });
2344         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2345                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2346                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2347       }
2348     }
2349 
2350     return Vec;
2351   }
2352 
2353   return SDValue();
2354 }
2355 
2356 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2357                                    SDValue Lo, SDValue Hi, SDValue VL,
2358                                    SelectionDAG &DAG) {
2359   bool HasPassthru = Passthru && !Passthru.isUndef();
2360   if (!HasPassthru && !Passthru)
2361     Passthru = DAG.getUNDEF(VT);
2362   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2363     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2364     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2365     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2366     // node in order to try and match RVV vector/scalar instructions.
2367     if ((LoC >> 31) == HiC)
2368       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2369 
2370     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2371     // vmv.v.x whose EEW = 32 to lower it.
2372     auto *Const = dyn_cast<ConstantSDNode>(VL);
2373     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2374       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2375       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2376       // access the subtarget here now.
2377       auto InterVec = DAG.getNode(
2378           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2379                                   DAG.getRegister(RISCV::X0, MVT::i32));
2380       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2381     }
2382   }
2383 
2384   // Fall back to a stack store and stride x0 vector load.
2385   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2386                      Hi, VL);
2387 }
2388 
2389 // Called by type legalization to handle splat of i64 on RV32.
2390 // FIXME: We can optimize this when the type has sign or zero bits in one
2391 // of the halves.
2392 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2393                                    SDValue Scalar, SDValue VL,
2394                                    SelectionDAG &DAG) {
2395   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2396   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2397                            DAG.getConstant(0, DL, MVT::i32));
2398   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2399                            DAG.getConstant(1, DL, MVT::i32));
2400   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2401 }
2402 
2403 // This function lowers a splat of a scalar operand Splat with the vector
2404 // length VL. It ensures the final sequence is type legal, which is useful when
2405 // lowering a splat after type legalization.
2406 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2407                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2408                                 const RISCVSubtarget &Subtarget) {
2409   bool HasPassthru = Passthru && !Passthru.isUndef();
2410   if (!HasPassthru && !Passthru)
2411     Passthru = DAG.getUNDEF(VT);
2412   if (VT.isFloatingPoint()) {
2413     // If VL is 1, we could use vfmv.s.f.
2414     if (isOneConstant(VL))
2415       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2416     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2417   }
2418 
2419   MVT XLenVT = Subtarget.getXLenVT();
2420 
2421   // Simplest case is that the operand needs to be promoted to XLenVT.
2422   if (Scalar.getValueType().bitsLE(XLenVT)) {
2423     // If the operand is a constant, sign extend to increase our chances
2424     // of being able to use a .vi instruction. ANY_EXTEND would become a
2425     // a zero extend and the simm5 check in isel would fail.
2426     // FIXME: Should we ignore the upper bits in isel instead?
2427     unsigned ExtOpc =
2428         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2429     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2430     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2431     // If VL is 1 and the scalar value won't benefit from immediate, we could
2432     // use vmv.s.x.
2433     if (isOneConstant(VL) &&
2434         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2435       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2436     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2437   }
2438 
2439   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2440          "Unexpected scalar for splat lowering!");
2441 
2442   if (isOneConstant(VL) && isNullConstant(Scalar))
2443     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2444                        DAG.getConstant(0, DL, XLenVT), VL);
2445 
2446   // Otherwise use the more complicated splatting algorithm.
2447   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2448 }
2449 
2450 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2451                                 const RISCVSubtarget &Subtarget) {
2452   // We need to be able to widen elements to the next larger integer type.
2453   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2454     return false;
2455 
2456   int Size = Mask.size();
2457   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2458 
2459   int Srcs[] = {-1, -1};
2460   for (int i = 0; i != Size; ++i) {
2461     // Ignore undef elements.
2462     if (Mask[i] < 0)
2463       continue;
2464 
2465     // Is this an even or odd element.
2466     int Pol = i % 2;
2467 
2468     // Ensure we consistently use the same source for this element polarity.
2469     int Src = Mask[i] / Size;
2470     if (Srcs[Pol] < 0)
2471       Srcs[Pol] = Src;
2472     if (Srcs[Pol] != Src)
2473       return false;
2474 
2475     // Make sure the element within the source is appropriate for this element
2476     // in the destination.
2477     int Elt = Mask[i] % Size;
2478     if (Elt != i / 2)
2479       return false;
2480   }
2481 
2482   // We need to find a source for each polarity and they can't be the same.
2483   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2484     return false;
2485 
2486   // Swap the sources if the second source was in the even polarity.
2487   SwapSources = Srcs[0] > Srcs[1];
2488 
2489   return true;
2490 }
2491 
2492 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2493 /// and then extract the original number of elements from the rotated result.
2494 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2495 /// returned rotation amount is for a rotate right, where elements move from
2496 /// higher elements to lower elements. \p LoSrc indicates the first source
2497 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2498 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2499 /// 0 or 1 if a rotation is found.
2500 ///
2501 /// NOTE: We talk about rotate to the right which matches how bit shift and
2502 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2503 /// and the table below write vectors with the lowest elements on the left.
2504 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2505   int Size = Mask.size();
2506 
2507   // We need to detect various ways of spelling a rotation:
2508   //   [11, 12, 13, 14, 15,  0,  1,  2]
2509   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2510   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2511   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2512   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2513   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2514   int Rotation = 0;
2515   LoSrc = -1;
2516   HiSrc = -1;
2517   for (int i = 0; i != Size; ++i) {
2518     int M = Mask[i];
2519     if (M < 0)
2520       continue;
2521 
2522     // Determine where a rotate vector would have started.
2523     int StartIdx = i - (M % Size);
2524     // The identity rotation isn't interesting, stop.
2525     if (StartIdx == 0)
2526       return -1;
2527 
2528     // If we found the tail of a vector the rotation must be the missing
2529     // front. If we found the head of a vector, it must be how much of the
2530     // head.
2531     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2532 
2533     if (Rotation == 0)
2534       Rotation = CandidateRotation;
2535     else if (Rotation != CandidateRotation)
2536       // The rotations don't match, so we can't match this mask.
2537       return -1;
2538 
2539     // Compute which value this mask is pointing at.
2540     int MaskSrc = M < Size ? 0 : 1;
2541 
2542     // Compute which of the two target values this index should be assigned to.
2543     // This reflects whether the high elements are remaining or the low elemnts
2544     // are remaining.
2545     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2546 
2547     // Either set up this value if we've not encountered it before, or check
2548     // that it remains consistent.
2549     if (TargetSrc < 0)
2550       TargetSrc = MaskSrc;
2551     else if (TargetSrc != MaskSrc)
2552       // This may be a rotation, but it pulls from the inputs in some
2553       // unsupported interleaving.
2554       return -1;
2555   }
2556 
2557   // Check that we successfully analyzed the mask, and normalize the results.
2558   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2559   assert((LoSrc >= 0 || HiSrc >= 0) &&
2560          "Failed to find a rotated input vector!");
2561 
2562   return Rotation;
2563 }
2564 
2565 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2566                                    const RISCVSubtarget &Subtarget) {
2567   SDValue V1 = Op.getOperand(0);
2568   SDValue V2 = Op.getOperand(1);
2569   SDLoc DL(Op);
2570   MVT XLenVT = Subtarget.getXLenVT();
2571   MVT VT = Op.getSimpleValueType();
2572   unsigned NumElts = VT.getVectorNumElements();
2573   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2574 
2575   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2576 
2577   SDValue TrueMask, VL;
2578   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2579 
2580   if (SVN->isSplat()) {
2581     const int Lane = SVN->getSplatIndex();
2582     if (Lane >= 0) {
2583       MVT SVT = VT.getVectorElementType();
2584 
2585       // Turn splatted vector load into a strided load with an X0 stride.
2586       SDValue V = V1;
2587       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2588       // with undef.
2589       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2590       int Offset = Lane;
2591       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2592         int OpElements =
2593             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2594         V = V.getOperand(Offset / OpElements);
2595         Offset %= OpElements;
2596       }
2597 
2598       // We need to ensure the load isn't atomic or volatile.
2599       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2600         auto *Ld = cast<LoadSDNode>(V);
2601         Offset *= SVT.getStoreSize();
2602         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2603                                                    TypeSize::Fixed(Offset), DL);
2604 
2605         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2606         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2607           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2608           SDValue IntID =
2609               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2610           SDValue Ops[] = {Ld->getChain(),
2611                            IntID,
2612                            DAG.getUNDEF(ContainerVT),
2613                            NewAddr,
2614                            DAG.getRegister(RISCV::X0, XLenVT),
2615                            VL};
2616           SDValue NewLoad = DAG.getMemIntrinsicNode(
2617               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2618               DAG.getMachineFunction().getMachineMemOperand(
2619                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2620           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2621           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2622         }
2623 
2624         // Otherwise use a scalar load and splat. This will give the best
2625         // opportunity to fold a splat into the operation. ISel can turn it into
2626         // the x0 strided load if we aren't able to fold away the select.
2627         if (SVT.isFloatingPoint())
2628           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2629                           Ld->getPointerInfo().getWithOffset(Offset),
2630                           Ld->getOriginalAlign(),
2631                           Ld->getMemOperand()->getFlags());
2632         else
2633           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2634                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2635                              Ld->getOriginalAlign(),
2636                              Ld->getMemOperand()->getFlags());
2637         DAG.makeEquivalentMemoryOrdering(Ld, V);
2638 
2639         unsigned Opc =
2640             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2641         SDValue Splat =
2642             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2643         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2644       }
2645 
2646       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2647       assert(Lane < (int)NumElts && "Unexpected lane!");
2648       SDValue Gather =
2649           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2650                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2651       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2652     }
2653   }
2654 
2655   ArrayRef<int> Mask = SVN->getMask();
2656 
2657   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2658   // be undef which can be handled with a single SLIDEDOWN/UP.
2659   int LoSrc, HiSrc;
2660   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2661   if (Rotation > 0) {
2662     SDValue LoV, HiV;
2663     if (LoSrc >= 0) {
2664       LoV = LoSrc == 0 ? V1 : V2;
2665       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2666     }
2667     if (HiSrc >= 0) {
2668       HiV = HiSrc == 0 ? V1 : V2;
2669       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2670     }
2671 
2672     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2673     // to slide LoV up by (NumElts - Rotation).
2674     unsigned InvRotate = NumElts - Rotation;
2675 
2676     SDValue Res = DAG.getUNDEF(ContainerVT);
2677     if (HiV) {
2678       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2679       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2680       // causes multiple vsetvlis in some test cases such as lowering
2681       // reduce.mul
2682       SDValue DownVL = VL;
2683       if (LoV)
2684         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2685       Res =
2686           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2687                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2688     }
2689     if (LoV)
2690       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2691                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2692 
2693     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2694   }
2695 
2696   // Detect an interleave shuffle and lower to
2697   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2698   bool SwapSources;
2699   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2700     // Swap sources if needed.
2701     if (SwapSources)
2702       std::swap(V1, V2);
2703 
2704     // Extract the lower half of the vectors.
2705     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2706     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2707                      DAG.getConstant(0, DL, XLenVT));
2708     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2709                      DAG.getConstant(0, DL, XLenVT));
2710 
2711     // Double the element width and halve the number of elements in an int type.
2712     unsigned EltBits = VT.getScalarSizeInBits();
2713     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2714     MVT WideIntVT =
2715         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2716     // Convert this to a scalable vector. We need to base this on the
2717     // destination size to ensure there's always a type with a smaller LMUL.
2718     MVT WideIntContainerVT =
2719         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2720 
2721     // Convert sources to scalable vectors with the same element count as the
2722     // larger type.
2723     MVT HalfContainerVT = MVT::getVectorVT(
2724         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2725     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2726     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2727 
2728     // Cast sources to integer.
2729     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2730     MVT IntHalfVT =
2731         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2732     V1 = DAG.getBitcast(IntHalfVT, V1);
2733     V2 = DAG.getBitcast(IntHalfVT, V2);
2734 
2735     // Freeze V2 since we use it twice and we need to be sure that the add and
2736     // multiply see the same value.
2737     V2 = DAG.getFreeze(V2);
2738 
2739     // Recreate TrueMask using the widened type's element count.
2740     MVT MaskVT =
2741         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2742     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2743 
2744     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2745     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2746                               V2, TrueMask, VL);
2747     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2748     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2749                                      DAG.getUNDEF(IntHalfVT),
2750                                      DAG.getAllOnesConstant(DL, XLenVT));
2751     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2752                                    V2, Multiplier, TrueMask, VL);
2753     // Add the new copies to our previous addition giving us 2^eltbits copies of
2754     // V2. This is equivalent to shifting V2 left by eltbits. This should
2755     // combine with the vwmulu.vv above to form vwmaccu.vv.
2756     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2757                       TrueMask, VL);
2758     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2759     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2760     // vector VT.
2761     ContainerVT =
2762         MVT::getVectorVT(VT.getVectorElementType(),
2763                          WideIntContainerVT.getVectorElementCount() * 2);
2764     Add = DAG.getBitcast(ContainerVT, Add);
2765     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2766   }
2767 
2768   // Detect shuffles which can be re-expressed as vector selects; these are
2769   // shuffles in which each element in the destination is taken from an element
2770   // at the corresponding index in either source vectors.
2771   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2772     int MaskIndex = MaskIdx.value();
2773     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2774   });
2775 
2776   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2777 
2778   SmallVector<SDValue> MaskVals;
2779   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2780   // merged with a second vrgather.
2781   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2782 
2783   // By default we preserve the original operand order, and use a mask to
2784   // select LHS as true and RHS as false. However, since RVV vector selects may
2785   // feature splats but only on the LHS, we may choose to invert our mask and
2786   // instead select between RHS and LHS.
2787   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2788   bool InvertMask = IsSelect == SwapOps;
2789 
2790   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2791   // half.
2792   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2793 
2794   // Now construct the mask that will be used by the vselect or blended
2795   // vrgather operation. For vrgathers, construct the appropriate indices into
2796   // each vector.
2797   for (int MaskIndex : Mask) {
2798     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2799     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2800     if (!IsSelect) {
2801       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2802       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2803                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2804                                      : DAG.getUNDEF(XLenVT));
2805       GatherIndicesRHS.push_back(
2806           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2807                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2808       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2809         ++LHSIndexCounts[MaskIndex];
2810       if (!IsLHSOrUndefIndex)
2811         ++RHSIndexCounts[MaskIndex - NumElts];
2812     }
2813   }
2814 
2815   if (SwapOps) {
2816     std::swap(V1, V2);
2817     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2818   }
2819 
2820   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2821   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2822   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2823 
2824   if (IsSelect)
2825     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2826 
2827   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2828     // On such a large vector we're unable to use i8 as the index type.
2829     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2830     // may involve vector splitting if we're already at LMUL=8, or our
2831     // user-supplied maximum fixed-length LMUL.
2832     return SDValue();
2833   }
2834 
2835   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2836   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2837   MVT IndexVT = VT.changeTypeToInteger();
2838   // Since we can't introduce illegal index types at this stage, use i16 and
2839   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2840   // than XLenVT.
2841   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2842     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2843     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2844   }
2845 
2846   MVT IndexContainerVT =
2847       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2848 
2849   SDValue Gather;
2850   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2851   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2852   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2853     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2854                               Subtarget);
2855   } else {
2856     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2857     // If only one index is used, we can use a "splat" vrgather.
2858     // TODO: We can splat the most-common index and fix-up any stragglers, if
2859     // that's beneficial.
2860     if (LHSIndexCounts.size() == 1) {
2861       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2862       Gather =
2863           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2864                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2865     } else {
2866       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2867       LHSIndices =
2868           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2869 
2870       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2871                            TrueMask, VL);
2872     }
2873   }
2874 
2875   // If a second vector operand is used by this shuffle, blend it in with an
2876   // additional vrgather.
2877   if (!V2.isUndef()) {
2878     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2879     // If only one index is used, we can use a "splat" vrgather.
2880     // TODO: We can splat the most-common index and fix-up any stragglers, if
2881     // that's beneficial.
2882     if (RHSIndexCounts.size() == 1) {
2883       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2884       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2885                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2886     } else {
2887       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2888       RHSIndices =
2889           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2890       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2891                        VL);
2892     }
2893 
2894     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2895     SelectMask =
2896         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2897 
2898     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2899                          Gather, VL);
2900   }
2901 
2902   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2903 }
2904 
2905 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2906   // Support splats for any type. These should type legalize well.
2907   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2908     return true;
2909 
2910   // Only support legal VTs for other shuffles for now.
2911   if (!isTypeLegal(VT))
2912     return false;
2913 
2914   MVT SVT = VT.getSimpleVT();
2915 
2916   bool SwapSources;
2917   int LoSrc, HiSrc;
2918   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2919          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2920 }
2921 
2922 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2923                                      SDLoc DL, SelectionDAG &DAG,
2924                                      const RISCVSubtarget &Subtarget) {
2925   if (VT.isScalableVector())
2926     return DAG.getFPExtendOrRound(Op, DL, VT);
2927   assert(VT.isFixedLengthVector() &&
2928          "Unexpected value type for RVV FP extend/round lowering");
2929   SDValue Mask, VL;
2930   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2931   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2932                         ? RISCVISD::FP_EXTEND_VL
2933                         : RISCVISD::FP_ROUND_VL;
2934   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2935 }
2936 
2937 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2938 // the exponent.
2939 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2940   MVT VT = Op.getSimpleValueType();
2941   unsigned EltSize = VT.getScalarSizeInBits();
2942   SDValue Src = Op.getOperand(0);
2943   SDLoc DL(Op);
2944 
2945   // We need a FP type that can represent the value.
2946   // TODO: Use f16 for i8 when possible?
2947   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2948   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2949 
2950   // Legal types should have been checked in the RISCVTargetLowering
2951   // constructor.
2952   // TODO: Splitting may make sense in some cases.
2953   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2954          "Expected legal float type!");
2955 
2956   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2957   // The trailing zero count is equal to log2 of this single bit value.
2958   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2959     SDValue Neg =
2960         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2961     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2962   }
2963 
2964   // We have a legal FP type, convert to it.
2965   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2966   // Bitcast to integer and shift the exponent to the LSB.
2967   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2968   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2969   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2970   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2971                               DAG.getConstant(ShiftAmt, DL, IntVT));
2972   // Truncate back to original type to allow vnsrl.
2973   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2974   // The exponent contains log2 of the value in biased form.
2975   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2976 
2977   // For trailing zeros, we just need to subtract the bias.
2978   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2979     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2980                        DAG.getConstant(ExponentBias, DL, VT));
2981 
2982   // For leading zeros, we need to remove the bias and convert from log2 to
2983   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2984   unsigned Adjust = ExponentBias + (EltSize - 1);
2985   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2986 }
2987 
2988 // While RVV has alignment restrictions, we should always be able to load as a
2989 // legal equivalently-sized byte-typed vector instead. This method is
2990 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2991 // the load is already correctly-aligned, it returns SDValue().
2992 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2993                                                     SelectionDAG &DAG) const {
2994   auto *Load = cast<LoadSDNode>(Op);
2995   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2996 
2997   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2998                                      Load->getMemoryVT(),
2999                                      *Load->getMemOperand()))
3000     return SDValue();
3001 
3002   SDLoc DL(Op);
3003   MVT VT = Op.getSimpleValueType();
3004   unsigned EltSizeBits = VT.getScalarSizeInBits();
3005   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3006          "Unexpected unaligned RVV load type");
3007   MVT NewVT =
3008       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3009   assert(NewVT.isValid() &&
3010          "Expecting equally-sized RVV vector types to be legal");
3011   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
3012                           Load->getPointerInfo(), Load->getOriginalAlign(),
3013                           Load->getMemOperand()->getFlags());
3014   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
3015 }
3016 
3017 // While RVV has alignment restrictions, we should always be able to store as a
3018 // legal equivalently-sized byte-typed vector instead. This method is
3019 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
3020 // returns SDValue() if the store is already correctly aligned.
3021 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
3022                                                      SelectionDAG &DAG) const {
3023   auto *Store = cast<StoreSDNode>(Op);
3024   assert(Store && Store->getValue().getValueType().isVector() &&
3025          "Expected vector store");
3026 
3027   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
3028                                      Store->getMemoryVT(),
3029                                      *Store->getMemOperand()))
3030     return SDValue();
3031 
3032   SDLoc DL(Op);
3033   SDValue StoredVal = Store->getValue();
3034   MVT VT = StoredVal.getSimpleValueType();
3035   unsigned EltSizeBits = VT.getScalarSizeInBits();
3036   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3037          "Unexpected unaligned RVV store type");
3038   MVT NewVT =
3039       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3040   assert(NewVT.isValid() &&
3041          "Expecting equally-sized RVV vector types to be legal");
3042   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3043   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3044                       Store->getPointerInfo(), Store->getOriginalAlign(),
3045                       Store->getMemOperand()->getFlags());
3046 }
3047 
3048 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3049                                             SelectionDAG &DAG) const {
3050   switch (Op.getOpcode()) {
3051   default:
3052     report_fatal_error("unimplemented operand");
3053   case ISD::GlobalAddress:
3054     return lowerGlobalAddress(Op, DAG);
3055   case ISD::BlockAddress:
3056     return lowerBlockAddress(Op, DAG);
3057   case ISD::ConstantPool:
3058     return lowerConstantPool(Op, DAG);
3059   case ISD::JumpTable:
3060     return lowerJumpTable(Op, DAG);
3061   case ISD::GlobalTLSAddress:
3062     return lowerGlobalTLSAddress(Op, DAG);
3063   case ISD::SELECT:
3064     return lowerSELECT(Op, DAG);
3065   case ISD::BRCOND:
3066     return lowerBRCOND(Op, DAG);
3067   case ISD::VASTART:
3068     return lowerVASTART(Op, DAG);
3069   case ISD::FRAMEADDR:
3070     return lowerFRAMEADDR(Op, DAG);
3071   case ISD::RETURNADDR:
3072     return lowerRETURNADDR(Op, DAG);
3073   case ISD::SHL_PARTS:
3074     return lowerShiftLeftParts(Op, DAG);
3075   case ISD::SRA_PARTS:
3076     return lowerShiftRightParts(Op, DAG, true);
3077   case ISD::SRL_PARTS:
3078     return lowerShiftRightParts(Op, DAG, false);
3079   case ISD::BITCAST: {
3080     SDLoc DL(Op);
3081     EVT VT = Op.getValueType();
3082     SDValue Op0 = Op.getOperand(0);
3083     EVT Op0VT = Op0.getValueType();
3084     MVT XLenVT = Subtarget.getXLenVT();
3085     if (VT.isFixedLengthVector()) {
3086       // We can handle fixed length vector bitcasts with a simple replacement
3087       // in isel.
3088       if (Op0VT.isFixedLengthVector())
3089         return Op;
3090       // When bitcasting from scalar to fixed-length vector, insert the scalar
3091       // into a one-element vector of the result type, and perform a vector
3092       // bitcast.
3093       if (!Op0VT.isVector()) {
3094         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3095         if (!isTypeLegal(BVT))
3096           return SDValue();
3097         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3098                                               DAG.getUNDEF(BVT), Op0,
3099                                               DAG.getConstant(0, DL, XLenVT)));
3100       }
3101       return SDValue();
3102     }
3103     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3104     // thus: bitcast the vector to a one-element vector type whose element type
3105     // is the same as the result type, and extract the first element.
3106     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3107       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3108       if (!isTypeLegal(BVT))
3109         return SDValue();
3110       SDValue BVec = DAG.getBitcast(BVT, Op0);
3111       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3112                          DAG.getConstant(0, DL, XLenVT));
3113     }
3114     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3115       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3116       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3117       return FPConv;
3118     }
3119     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3120         Subtarget.hasStdExtF()) {
3121       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3122       SDValue FPConv =
3123           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3124       return FPConv;
3125     }
3126     return SDValue();
3127   }
3128   case ISD::INTRINSIC_WO_CHAIN:
3129     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3130   case ISD::INTRINSIC_W_CHAIN:
3131     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3132   case ISD::INTRINSIC_VOID:
3133     return LowerINTRINSIC_VOID(Op, DAG);
3134   case ISD::BSWAP:
3135   case ISD::BITREVERSE: {
3136     MVT VT = Op.getSimpleValueType();
3137     SDLoc DL(Op);
3138     if (Subtarget.hasStdExtZbp()) {
3139       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3140       // Start with the maximum immediate value which is the bitwidth - 1.
3141       unsigned Imm = VT.getSizeInBits() - 1;
3142       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3143       if (Op.getOpcode() == ISD::BSWAP)
3144         Imm &= ~0x7U;
3145       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3146                          DAG.getConstant(Imm, DL, VT));
3147     }
3148     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3149     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3150     // Expand bitreverse to a bswap(rev8) followed by brev8.
3151     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3152     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3153     // as brev8 by an isel pattern.
3154     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3155                        DAG.getConstant(7, DL, VT));
3156   }
3157   case ISD::FSHL:
3158   case ISD::FSHR: {
3159     MVT VT = Op.getSimpleValueType();
3160     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3161     SDLoc DL(Op);
3162     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3163     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3164     // accidentally setting the extra bit.
3165     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3166     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3167                                 DAG.getConstant(ShAmtWidth, DL, VT));
3168     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3169     // instruction use different orders. fshl will return its first operand for
3170     // shift of zero, fshr will return its second operand. fsl and fsr both
3171     // return rs1 so the ISD nodes need to have different operand orders.
3172     // Shift amount is in rs2.
3173     SDValue Op0 = Op.getOperand(0);
3174     SDValue Op1 = Op.getOperand(1);
3175     unsigned Opc = RISCVISD::FSL;
3176     if (Op.getOpcode() == ISD::FSHR) {
3177       std::swap(Op0, Op1);
3178       Opc = RISCVISD::FSR;
3179     }
3180     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3181   }
3182   case ISD::TRUNCATE: {
3183     SDLoc DL(Op);
3184     MVT VT = Op.getSimpleValueType();
3185     // Only custom-lower vector truncates
3186     if (!VT.isVector())
3187       return Op;
3188 
3189     // Truncates to mask types are handled differently
3190     if (VT.getVectorElementType() == MVT::i1)
3191       return lowerVectorMaskTrunc(Op, DAG);
3192 
3193     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3194     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3195     // truncate by one power of two at a time.
3196     MVT DstEltVT = VT.getVectorElementType();
3197 
3198     SDValue Src = Op.getOperand(0);
3199     MVT SrcVT = Src.getSimpleValueType();
3200     MVT SrcEltVT = SrcVT.getVectorElementType();
3201 
3202     assert(DstEltVT.bitsLT(SrcEltVT) &&
3203            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3204            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3205            "Unexpected vector truncate lowering");
3206 
3207     MVT ContainerVT = SrcVT;
3208     if (SrcVT.isFixedLengthVector()) {
3209       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3210       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3211     }
3212 
3213     SDValue Result = Src;
3214     SDValue Mask, VL;
3215     std::tie(Mask, VL) =
3216         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3217     LLVMContext &Context = *DAG.getContext();
3218     const ElementCount Count = ContainerVT.getVectorElementCount();
3219     do {
3220       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3221       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3222       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3223                            Mask, VL);
3224     } while (SrcEltVT != DstEltVT);
3225 
3226     if (SrcVT.isFixedLengthVector())
3227       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3228 
3229     return Result;
3230   }
3231   case ISD::ANY_EXTEND:
3232   case ISD::ZERO_EXTEND:
3233     if (Op.getOperand(0).getValueType().isVector() &&
3234         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3235       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3236     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3237   case ISD::SIGN_EXTEND:
3238     if (Op.getOperand(0).getValueType().isVector() &&
3239         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3240       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3241     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3242   case ISD::SPLAT_VECTOR_PARTS:
3243     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3244   case ISD::INSERT_VECTOR_ELT:
3245     return lowerINSERT_VECTOR_ELT(Op, DAG);
3246   case ISD::EXTRACT_VECTOR_ELT:
3247     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3248   case ISD::VSCALE: {
3249     MVT VT = Op.getSimpleValueType();
3250     SDLoc DL(Op);
3251     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3252     // We define our scalable vector types for lmul=1 to use a 64 bit known
3253     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3254     // vscale as VLENB / 8.
3255     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3256     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3257       report_fatal_error("Support for VLEN==32 is incomplete.");
3258     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3259       // We assume VLENB is a multiple of 8. We manually choose the best shift
3260       // here because SimplifyDemandedBits isn't always able to simplify it.
3261       uint64_t Val = Op.getConstantOperandVal(0);
3262       if (isPowerOf2_64(Val)) {
3263         uint64_t Log2 = Log2_64(Val);
3264         if (Log2 < 3)
3265           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3266                              DAG.getConstant(3 - Log2, DL, VT));
3267         if (Log2 > 3)
3268           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3269                              DAG.getConstant(Log2 - 3, DL, VT));
3270         return VLENB;
3271       }
3272       // If the multiplier is a multiple of 8, scale it down to avoid needing
3273       // to shift the VLENB value.
3274       if ((Val % 8) == 0)
3275         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3276                            DAG.getConstant(Val / 8, DL, VT));
3277     }
3278 
3279     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3280                                  DAG.getConstant(3, DL, VT));
3281     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3282   }
3283   case ISD::FPOWI: {
3284     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3285     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3286     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3287         Op.getOperand(1).getValueType() == MVT::i32) {
3288       SDLoc DL(Op);
3289       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3290       SDValue Powi =
3291           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3292       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3293                          DAG.getIntPtrConstant(0, DL));
3294     }
3295     return SDValue();
3296   }
3297   case ISD::FP_EXTEND: {
3298     // RVV can only do fp_extend to types double the size as the source. We
3299     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3300     // via f32.
3301     SDLoc DL(Op);
3302     MVT VT = Op.getSimpleValueType();
3303     SDValue Src = Op.getOperand(0);
3304     MVT SrcVT = Src.getSimpleValueType();
3305 
3306     // Prepare any fixed-length vector operands.
3307     MVT ContainerVT = VT;
3308     if (SrcVT.isFixedLengthVector()) {
3309       ContainerVT = getContainerForFixedLengthVector(VT);
3310       MVT SrcContainerVT =
3311           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3312       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3313     }
3314 
3315     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3316         SrcVT.getVectorElementType() != MVT::f16) {
3317       // For scalable vectors, we only need to close the gap between
3318       // vXf16->vXf64.
3319       if (!VT.isFixedLengthVector())
3320         return Op;
3321       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3322       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3323       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3324     }
3325 
3326     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3327     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3328     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3329         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3330 
3331     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3332                                            DL, DAG, Subtarget);
3333     if (VT.isFixedLengthVector())
3334       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3335     return Extend;
3336   }
3337   case ISD::FP_ROUND: {
3338     // RVV can only do fp_round to types half the size as the source. We
3339     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3340     // conversion instruction.
3341     SDLoc DL(Op);
3342     MVT VT = Op.getSimpleValueType();
3343     SDValue Src = Op.getOperand(0);
3344     MVT SrcVT = Src.getSimpleValueType();
3345 
3346     // Prepare any fixed-length vector operands.
3347     MVT ContainerVT = VT;
3348     if (VT.isFixedLengthVector()) {
3349       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3350       ContainerVT =
3351           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3352       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3353     }
3354 
3355     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3356         SrcVT.getVectorElementType() != MVT::f64) {
3357       // For scalable vectors, we only need to close the gap between
3358       // vXf64<->vXf16.
3359       if (!VT.isFixedLengthVector())
3360         return Op;
3361       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3362       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3363       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3364     }
3365 
3366     SDValue Mask, VL;
3367     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3368 
3369     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3370     SDValue IntermediateRound =
3371         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3372     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3373                                           DL, DAG, Subtarget);
3374 
3375     if (VT.isFixedLengthVector())
3376       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3377     return Round;
3378   }
3379   case ISD::FP_TO_SINT:
3380   case ISD::FP_TO_UINT:
3381   case ISD::SINT_TO_FP:
3382   case ISD::UINT_TO_FP: {
3383     // RVV can only do fp<->int conversions to types half/double the size as
3384     // the source. We custom-lower any conversions that do two hops into
3385     // sequences.
3386     MVT VT = Op.getSimpleValueType();
3387     if (!VT.isVector())
3388       return Op;
3389     SDLoc DL(Op);
3390     SDValue Src = Op.getOperand(0);
3391     MVT EltVT = VT.getVectorElementType();
3392     MVT SrcVT = Src.getSimpleValueType();
3393     MVT SrcEltVT = SrcVT.getVectorElementType();
3394     unsigned EltSize = EltVT.getSizeInBits();
3395     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3396     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3397            "Unexpected vector element types");
3398 
3399     bool IsInt2FP = SrcEltVT.isInteger();
3400     // Widening conversions
3401     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3402       if (IsInt2FP) {
3403         // Do a regular integer sign/zero extension then convert to float.
3404         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3405                                       VT.getVectorElementCount());
3406         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3407                                  ? ISD::ZERO_EXTEND
3408                                  : ISD::SIGN_EXTEND;
3409         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3410         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3411       }
3412       // FP2Int
3413       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3414       // Do one doubling fp_extend then complete the operation by converting
3415       // to int.
3416       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3417       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3418       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3419     }
3420 
3421     // Narrowing conversions
3422     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3423       if (IsInt2FP) {
3424         // One narrowing int_to_fp, then an fp_round.
3425         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3426         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3427         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3428         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3429       }
3430       // FP2Int
3431       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3432       // representable by the integer, the result is poison.
3433       MVT IVecVT =
3434           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3435                            VT.getVectorElementCount());
3436       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3437       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3438     }
3439 
3440     // Scalable vectors can exit here. Patterns will handle equally-sized
3441     // conversions halving/doubling ones.
3442     if (!VT.isFixedLengthVector())
3443       return Op;
3444 
3445     // For fixed-length vectors we lower to a custom "VL" node.
3446     unsigned RVVOpc = 0;
3447     switch (Op.getOpcode()) {
3448     default:
3449       llvm_unreachable("Impossible opcode");
3450     case ISD::FP_TO_SINT:
3451       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3452       break;
3453     case ISD::FP_TO_UINT:
3454       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3455       break;
3456     case ISD::SINT_TO_FP:
3457       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3458       break;
3459     case ISD::UINT_TO_FP:
3460       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3461       break;
3462     }
3463 
3464     MVT ContainerVT, SrcContainerVT;
3465     // Derive the reference container type from the larger vector type.
3466     if (SrcEltSize > EltSize) {
3467       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3468       ContainerVT =
3469           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3470     } else {
3471       ContainerVT = getContainerForFixedLengthVector(VT);
3472       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3473     }
3474 
3475     SDValue Mask, VL;
3476     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3477 
3478     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3479     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3480     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3481   }
3482   case ISD::FP_TO_SINT_SAT:
3483   case ISD::FP_TO_UINT_SAT:
3484     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3485   case ISD::FTRUNC:
3486   case ISD::FCEIL:
3487   case ISD::FFLOOR:
3488     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3489   case ISD::FROUND:
3490     return lowerFROUND(Op, DAG);
3491   case ISD::VECREDUCE_ADD:
3492   case ISD::VECREDUCE_UMAX:
3493   case ISD::VECREDUCE_SMAX:
3494   case ISD::VECREDUCE_UMIN:
3495   case ISD::VECREDUCE_SMIN:
3496     return lowerVECREDUCE(Op, DAG);
3497   case ISD::VECREDUCE_AND:
3498   case ISD::VECREDUCE_OR:
3499   case ISD::VECREDUCE_XOR:
3500     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3501       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3502     return lowerVECREDUCE(Op, DAG);
3503   case ISD::VECREDUCE_FADD:
3504   case ISD::VECREDUCE_SEQ_FADD:
3505   case ISD::VECREDUCE_FMIN:
3506   case ISD::VECREDUCE_FMAX:
3507     return lowerFPVECREDUCE(Op, DAG);
3508   case ISD::VP_REDUCE_ADD:
3509   case ISD::VP_REDUCE_UMAX:
3510   case ISD::VP_REDUCE_SMAX:
3511   case ISD::VP_REDUCE_UMIN:
3512   case ISD::VP_REDUCE_SMIN:
3513   case ISD::VP_REDUCE_FADD:
3514   case ISD::VP_REDUCE_SEQ_FADD:
3515   case ISD::VP_REDUCE_FMIN:
3516   case ISD::VP_REDUCE_FMAX:
3517     return lowerVPREDUCE(Op, DAG);
3518   case ISD::VP_REDUCE_AND:
3519   case ISD::VP_REDUCE_OR:
3520   case ISD::VP_REDUCE_XOR:
3521     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3522       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3523     return lowerVPREDUCE(Op, DAG);
3524   case ISD::INSERT_SUBVECTOR:
3525     return lowerINSERT_SUBVECTOR(Op, DAG);
3526   case ISD::EXTRACT_SUBVECTOR:
3527     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3528   case ISD::STEP_VECTOR:
3529     return lowerSTEP_VECTOR(Op, DAG);
3530   case ISD::VECTOR_REVERSE:
3531     return lowerVECTOR_REVERSE(Op, DAG);
3532   case ISD::VECTOR_SPLICE:
3533     return lowerVECTOR_SPLICE(Op, DAG);
3534   case ISD::BUILD_VECTOR:
3535     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3536   case ISD::SPLAT_VECTOR:
3537     if (Op.getValueType().getVectorElementType() == MVT::i1)
3538       return lowerVectorMaskSplat(Op, DAG);
3539     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3540   case ISD::VECTOR_SHUFFLE:
3541     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3542   case ISD::CONCAT_VECTORS: {
3543     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3544     // better than going through the stack, as the default expansion does.
3545     SDLoc DL(Op);
3546     MVT VT = Op.getSimpleValueType();
3547     unsigned NumOpElts =
3548         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3549     SDValue Vec = DAG.getUNDEF(VT);
3550     for (const auto &OpIdx : enumerate(Op->ops())) {
3551       SDValue SubVec = OpIdx.value();
3552       // Don't insert undef subvectors.
3553       if (SubVec.isUndef())
3554         continue;
3555       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3556                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3557     }
3558     return Vec;
3559   }
3560   case ISD::LOAD:
3561     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3562       return V;
3563     if (Op.getValueType().isFixedLengthVector())
3564       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3565     return Op;
3566   case ISD::STORE:
3567     if (auto V = expandUnalignedRVVStore(Op, DAG))
3568       return V;
3569     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3570       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3571     return Op;
3572   case ISD::MLOAD:
3573   case ISD::VP_LOAD:
3574     return lowerMaskedLoad(Op, DAG);
3575   case ISD::MSTORE:
3576   case ISD::VP_STORE:
3577     return lowerMaskedStore(Op, DAG);
3578   case ISD::SETCC:
3579     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3580   case ISD::ADD:
3581     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3582   case ISD::SUB:
3583     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3584   case ISD::MUL:
3585     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3586   case ISD::MULHS:
3587     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3588   case ISD::MULHU:
3589     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3590   case ISD::AND:
3591     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3592                                               RISCVISD::AND_VL);
3593   case ISD::OR:
3594     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3595                                               RISCVISD::OR_VL);
3596   case ISD::XOR:
3597     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3598                                               RISCVISD::XOR_VL);
3599   case ISD::SDIV:
3600     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3601   case ISD::SREM:
3602     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3603   case ISD::UDIV:
3604     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3605   case ISD::UREM:
3606     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3607   case ISD::SHL:
3608   case ISD::SRA:
3609   case ISD::SRL:
3610     if (Op.getSimpleValueType().isFixedLengthVector())
3611       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3612     // This can be called for an i32 shift amount that needs to be promoted.
3613     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3614            "Unexpected custom legalisation");
3615     return SDValue();
3616   case ISD::SADDSAT:
3617     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3618   case ISD::UADDSAT:
3619     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3620   case ISD::SSUBSAT:
3621     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3622   case ISD::USUBSAT:
3623     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3624   case ISD::FADD:
3625     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3626   case ISD::FSUB:
3627     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3628   case ISD::FMUL:
3629     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3630   case ISD::FDIV:
3631     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3632   case ISD::FNEG:
3633     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3634   case ISD::FABS:
3635     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3636   case ISD::FSQRT:
3637     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3638   case ISD::FMA:
3639     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3640   case ISD::SMIN:
3641     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3642   case ISD::SMAX:
3643     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3644   case ISD::UMIN:
3645     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3646   case ISD::UMAX:
3647     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3648   case ISD::FMINNUM:
3649     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3650   case ISD::FMAXNUM:
3651     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3652   case ISD::ABS:
3653     return lowerABS(Op, DAG);
3654   case ISD::CTLZ_ZERO_UNDEF:
3655   case ISD::CTTZ_ZERO_UNDEF:
3656     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3657   case ISD::VSELECT:
3658     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3659   case ISD::FCOPYSIGN:
3660     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3661   case ISD::MGATHER:
3662   case ISD::VP_GATHER:
3663     return lowerMaskedGather(Op, DAG);
3664   case ISD::MSCATTER:
3665   case ISD::VP_SCATTER:
3666     return lowerMaskedScatter(Op, DAG);
3667   case ISD::FLT_ROUNDS_:
3668     return lowerGET_ROUNDING(Op, DAG);
3669   case ISD::SET_ROUNDING:
3670     return lowerSET_ROUNDING(Op, DAG);
3671   case ISD::VP_SELECT:
3672     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3673   case ISD::VP_MERGE:
3674     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3675   case ISD::VP_ADD:
3676     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3677   case ISD::VP_SUB:
3678     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3679   case ISD::VP_MUL:
3680     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3681   case ISD::VP_SDIV:
3682     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3683   case ISD::VP_UDIV:
3684     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3685   case ISD::VP_SREM:
3686     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3687   case ISD::VP_UREM:
3688     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3689   case ISD::VP_AND:
3690     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3691   case ISD::VP_OR:
3692     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3693   case ISD::VP_XOR:
3694     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3695   case ISD::VP_ASHR:
3696     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3697   case ISD::VP_LSHR:
3698     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3699   case ISD::VP_SHL:
3700     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3701   case ISD::VP_FADD:
3702     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3703   case ISD::VP_FSUB:
3704     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3705   case ISD::VP_FMUL:
3706     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3707   case ISD::VP_FDIV:
3708     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3709   case ISD::VP_FNEG:
3710     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3711   case ISD::VP_FMA:
3712     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3713   }
3714 }
3715 
3716 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3717                              SelectionDAG &DAG, unsigned Flags) {
3718   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3719 }
3720 
3721 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3722                              SelectionDAG &DAG, unsigned Flags) {
3723   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3724                                    Flags);
3725 }
3726 
3727 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3728                              SelectionDAG &DAG, unsigned Flags) {
3729   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3730                                    N->getOffset(), Flags);
3731 }
3732 
3733 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3734                              SelectionDAG &DAG, unsigned Flags) {
3735   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3736 }
3737 
3738 template <class NodeTy>
3739 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3740                                      bool IsLocal) const {
3741   SDLoc DL(N);
3742   EVT Ty = getPointerTy(DAG.getDataLayout());
3743 
3744   if (isPositionIndependent()) {
3745     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3746     if (IsLocal)
3747       // Use PC-relative addressing to access the symbol. This generates the
3748       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3749       // %pcrel_lo(auipc)).
3750       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3751 
3752     // Use PC-relative addressing to access the GOT for this symbol, then load
3753     // the address from the GOT. This generates the pattern (PseudoLA sym),
3754     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3755     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3756   }
3757 
3758   switch (getTargetMachine().getCodeModel()) {
3759   default:
3760     report_fatal_error("Unsupported code model for lowering");
3761   case CodeModel::Small: {
3762     // Generate a sequence for accessing addresses within the first 2 GiB of
3763     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3764     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3765     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3766     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3767     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3768   }
3769   case CodeModel::Medium: {
3770     // Generate a sequence for accessing addresses within any 2GiB range within
3771     // the address space. This generates the pattern (PseudoLLA sym), which
3772     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3773     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3774     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3775   }
3776   }
3777 }
3778 
3779 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3780                                                 SelectionDAG &DAG) const {
3781   SDLoc DL(Op);
3782   EVT Ty = Op.getValueType();
3783   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3784   int64_t Offset = N->getOffset();
3785   MVT XLenVT = Subtarget.getXLenVT();
3786 
3787   const GlobalValue *GV = N->getGlobal();
3788   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3789   SDValue Addr = getAddr(N, DAG, IsLocal);
3790 
3791   // In order to maximise the opportunity for common subexpression elimination,
3792   // emit a separate ADD node for the global address offset instead of folding
3793   // it in the global address node. Later peephole optimisations may choose to
3794   // fold it back in when profitable.
3795   if (Offset != 0)
3796     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3797                        DAG.getConstant(Offset, DL, XLenVT));
3798   return Addr;
3799 }
3800 
3801 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3802                                                SelectionDAG &DAG) const {
3803   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3804 
3805   return getAddr(N, DAG);
3806 }
3807 
3808 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3809                                                SelectionDAG &DAG) const {
3810   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3811 
3812   return getAddr(N, DAG);
3813 }
3814 
3815 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3816                                             SelectionDAG &DAG) const {
3817   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3818 
3819   return getAddr(N, DAG);
3820 }
3821 
3822 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3823                                               SelectionDAG &DAG,
3824                                               bool UseGOT) const {
3825   SDLoc DL(N);
3826   EVT Ty = getPointerTy(DAG.getDataLayout());
3827   const GlobalValue *GV = N->getGlobal();
3828   MVT XLenVT = Subtarget.getXLenVT();
3829 
3830   if (UseGOT) {
3831     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3832     // load the address from the GOT and add the thread pointer. This generates
3833     // the pattern (PseudoLA_TLS_IE sym), which expands to
3834     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3835     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3836     SDValue Load =
3837         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3838 
3839     // Add the thread pointer.
3840     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3841     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3842   }
3843 
3844   // Generate a sequence for accessing the address relative to the thread
3845   // pointer, with the appropriate adjustment for the thread pointer offset.
3846   // This generates the pattern
3847   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3848   SDValue AddrHi =
3849       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3850   SDValue AddrAdd =
3851       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3852   SDValue AddrLo =
3853       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3854 
3855   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3856   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3857   SDValue MNAdd = SDValue(
3858       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3859       0);
3860   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3861 }
3862 
3863 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3864                                                SelectionDAG &DAG) const {
3865   SDLoc DL(N);
3866   EVT Ty = getPointerTy(DAG.getDataLayout());
3867   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3868   const GlobalValue *GV = N->getGlobal();
3869 
3870   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3871   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3872   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3873   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3874   SDValue Load =
3875       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3876 
3877   // Prepare argument list to generate call.
3878   ArgListTy Args;
3879   ArgListEntry Entry;
3880   Entry.Node = Load;
3881   Entry.Ty = CallTy;
3882   Args.push_back(Entry);
3883 
3884   // Setup call to __tls_get_addr.
3885   TargetLowering::CallLoweringInfo CLI(DAG);
3886   CLI.setDebugLoc(DL)
3887       .setChain(DAG.getEntryNode())
3888       .setLibCallee(CallingConv::C, CallTy,
3889                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3890                     std::move(Args));
3891 
3892   return LowerCallTo(CLI).first;
3893 }
3894 
3895 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3896                                                    SelectionDAG &DAG) const {
3897   SDLoc DL(Op);
3898   EVT Ty = Op.getValueType();
3899   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3900   int64_t Offset = N->getOffset();
3901   MVT XLenVT = Subtarget.getXLenVT();
3902 
3903   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3904 
3905   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3906       CallingConv::GHC)
3907     report_fatal_error("In GHC calling convention TLS is not supported");
3908 
3909   SDValue Addr;
3910   switch (Model) {
3911   case TLSModel::LocalExec:
3912     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3913     break;
3914   case TLSModel::InitialExec:
3915     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3916     break;
3917   case TLSModel::LocalDynamic:
3918   case TLSModel::GeneralDynamic:
3919     Addr = getDynamicTLSAddr(N, DAG);
3920     break;
3921   }
3922 
3923   // In order to maximise the opportunity for common subexpression elimination,
3924   // emit a separate ADD node for the global address offset instead of folding
3925   // it in the global address node. Later peephole optimisations may choose to
3926   // fold it back in when profitable.
3927   if (Offset != 0)
3928     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3929                        DAG.getConstant(Offset, DL, XLenVT));
3930   return Addr;
3931 }
3932 
3933 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3934   SDValue CondV = Op.getOperand(0);
3935   SDValue TrueV = Op.getOperand(1);
3936   SDValue FalseV = Op.getOperand(2);
3937   SDLoc DL(Op);
3938   MVT VT = Op.getSimpleValueType();
3939   MVT XLenVT = Subtarget.getXLenVT();
3940 
3941   // Lower vector SELECTs to VSELECTs by splatting the condition.
3942   if (VT.isVector()) {
3943     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3944     SDValue CondSplat = VT.isScalableVector()
3945                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3946                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3947     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3948   }
3949 
3950   // If the result type is XLenVT and CondV is the output of a SETCC node
3951   // which also operated on XLenVT inputs, then merge the SETCC node into the
3952   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3953   // compare+branch instructions. i.e.:
3954   // (select (setcc lhs, rhs, cc), truev, falsev)
3955   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3956   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3957       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3958     SDValue LHS = CondV.getOperand(0);
3959     SDValue RHS = CondV.getOperand(1);
3960     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3961     ISD::CondCode CCVal = CC->get();
3962 
3963     // Special case for a select of 2 constants that have a diffence of 1.
3964     // Normally this is done by DAGCombine, but if the select is introduced by
3965     // type legalization or op legalization, we miss it. Restricting to SETLT
3966     // case for now because that is what signed saturating add/sub need.
3967     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3968     // but we would probably want to swap the true/false values if the condition
3969     // is SETGE/SETLE to avoid an XORI.
3970     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3971         CCVal == ISD::SETLT) {
3972       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3973       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3974       if (TrueVal - 1 == FalseVal)
3975         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3976       if (TrueVal + 1 == FalseVal)
3977         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3978     }
3979 
3980     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3981 
3982     SDValue TargetCC = DAG.getCondCode(CCVal);
3983     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3984     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3985   }
3986 
3987   // Otherwise:
3988   // (select condv, truev, falsev)
3989   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3990   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3991   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3992 
3993   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3994 
3995   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3996 }
3997 
3998 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3999   SDValue CondV = Op.getOperand(1);
4000   SDLoc DL(Op);
4001   MVT XLenVT = Subtarget.getXLenVT();
4002 
4003   if (CondV.getOpcode() == ISD::SETCC &&
4004       CondV.getOperand(0).getValueType() == XLenVT) {
4005     SDValue LHS = CondV.getOperand(0);
4006     SDValue RHS = CondV.getOperand(1);
4007     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
4008 
4009     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4010 
4011     SDValue TargetCC = DAG.getCondCode(CCVal);
4012     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4013                        LHS, RHS, TargetCC, Op.getOperand(2));
4014   }
4015 
4016   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4017                      CondV, DAG.getConstant(0, DL, XLenVT),
4018                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
4019 }
4020 
4021 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
4022   MachineFunction &MF = DAG.getMachineFunction();
4023   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
4024 
4025   SDLoc DL(Op);
4026   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
4027                                  getPointerTy(MF.getDataLayout()));
4028 
4029   // vastart just stores the address of the VarArgsFrameIndex slot into the
4030   // memory location argument.
4031   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4032   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
4033                       MachinePointerInfo(SV));
4034 }
4035 
4036 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
4037                                             SelectionDAG &DAG) const {
4038   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4039   MachineFunction &MF = DAG.getMachineFunction();
4040   MachineFrameInfo &MFI = MF.getFrameInfo();
4041   MFI.setFrameAddressIsTaken(true);
4042   Register FrameReg = RI.getFrameRegister(MF);
4043   int XLenInBytes = Subtarget.getXLen() / 8;
4044 
4045   EVT VT = Op.getValueType();
4046   SDLoc DL(Op);
4047   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
4048   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4049   while (Depth--) {
4050     int Offset = -(XLenInBytes * 2);
4051     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
4052                               DAG.getIntPtrConstant(Offset, DL));
4053     FrameAddr =
4054         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
4055   }
4056   return FrameAddr;
4057 }
4058 
4059 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
4060                                              SelectionDAG &DAG) const {
4061   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4062   MachineFunction &MF = DAG.getMachineFunction();
4063   MachineFrameInfo &MFI = MF.getFrameInfo();
4064   MFI.setReturnAddressIsTaken(true);
4065   MVT XLenVT = Subtarget.getXLenVT();
4066   int XLenInBytes = Subtarget.getXLen() / 8;
4067 
4068   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4069     return SDValue();
4070 
4071   EVT VT = Op.getValueType();
4072   SDLoc DL(Op);
4073   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4074   if (Depth) {
4075     int Off = -XLenInBytes;
4076     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4077     SDValue Offset = DAG.getConstant(Off, DL, VT);
4078     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4079                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4080                        MachinePointerInfo());
4081   }
4082 
4083   // Return the value of the return address register, marking it an implicit
4084   // live-in.
4085   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4086   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4087 }
4088 
4089 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4090                                                  SelectionDAG &DAG) const {
4091   SDLoc DL(Op);
4092   SDValue Lo = Op.getOperand(0);
4093   SDValue Hi = Op.getOperand(1);
4094   SDValue Shamt = Op.getOperand(2);
4095   EVT VT = Lo.getValueType();
4096 
4097   // if Shamt-XLEN < 0: // Shamt < XLEN
4098   //   Lo = Lo << Shamt
4099   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
4100   // else:
4101   //   Lo = 0
4102   //   Hi = Lo << (Shamt-XLEN)
4103 
4104   SDValue Zero = DAG.getConstant(0, DL, VT);
4105   SDValue One = DAG.getConstant(1, DL, VT);
4106   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4107   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4108   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4109   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4110 
4111   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4112   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4113   SDValue ShiftRightLo =
4114       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4115   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4116   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4117   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4118 
4119   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4120 
4121   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4122   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4123 
4124   SDValue Parts[2] = {Lo, Hi};
4125   return DAG.getMergeValues(Parts, DL);
4126 }
4127 
4128 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4129                                                   bool IsSRA) const {
4130   SDLoc DL(Op);
4131   SDValue Lo = Op.getOperand(0);
4132   SDValue Hi = Op.getOperand(1);
4133   SDValue Shamt = Op.getOperand(2);
4134   EVT VT = Lo.getValueType();
4135 
4136   // SRA expansion:
4137   //   if Shamt-XLEN < 0: // Shamt < XLEN
4138   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4139   //     Hi = Hi >>s Shamt
4140   //   else:
4141   //     Lo = Hi >>s (Shamt-XLEN);
4142   //     Hi = Hi >>s (XLEN-1)
4143   //
4144   // SRL expansion:
4145   //   if Shamt-XLEN < 0: // Shamt < XLEN
4146   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4147   //     Hi = Hi >>u Shamt
4148   //   else:
4149   //     Lo = Hi >>u (Shamt-XLEN);
4150   //     Hi = 0;
4151 
4152   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4153 
4154   SDValue Zero = DAG.getConstant(0, DL, VT);
4155   SDValue One = DAG.getConstant(1, DL, VT);
4156   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4157   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4158   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4159   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4160 
4161   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4162   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4163   SDValue ShiftLeftHi =
4164       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4165   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4166   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4167   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4168   SDValue HiFalse =
4169       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4170 
4171   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4172 
4173   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4174   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4175 
4176   SDValue Parts[2] = {Lo, Hi};
4177   return DAG.getMergeValues(Parts, DL);
4178 }
4179 
4180 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4181 // legal equivalently-sized i8 type, so we can use that as a go-between.
4182 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4183                                                   SelectionDAG &DAG) const {
4184   SDLoc DL(Op);
4185   MVT VT = Op.getSimpleValueType();
4186   SDValue SplatVal = Op.getOperand(0);
4187   // All-zeros or all-ones splats are handled specially.
4188   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4189     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4190     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4191   }
4192   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4193     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4194     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4195   }
4196   MVT XLenVT = Subtarget.getXLenVT();
4197   assert(SplatVal.getValueType() == XLenVT &&
4198          "Unexpected type for i1 splat value");
4199   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4200   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4201                          DAG.getConstant(1, DL, XLenVT));
4202   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4203   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4204   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4205 }
4206 
4207 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4208 // illegal (currently only vXi64 RV32).
4209 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4210 // them to VMV_V_X_VL.
4211 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4212                                                      SelectionDAG &DAG) const {
4213   SDLoc DL(Op);
4214   MVT VecVT = Op.getSimpleValueType();
4215   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4216          "Unexpected SPLAT_VECTOR_PARTS lowering");
4217 
4218   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4219   SDValue Lo = Op.getOperand(0);
4220   SDValue Hi = Op.getOperand(1);
4221 
4222   if (VecVT.isFixedLengthVector()) {
4223     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4224     SDLoc DL(Op);
4225     SDValue Mask, VL;
4226     std::tie(Mask, VL) =
4227         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4228 
4229     SDValue Res =
4230         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4231     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4232   }
4233 
4234   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4235     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4236     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4237     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4238     // node in order to try and match RVV vector/scalar instructions.
4239     if ((LoC >> 31) == HiC)
4240       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4241                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4242   }
4243 
4244   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4245   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4246       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4247       Hi.getConstantOperandVal(1) == 31)
4248     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4249                        DAG.getRegister(RISCV::X0, MVT::i32));
4250 
4251   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4252   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4253                      DAG.getUNDEF(VecVT), Lo, Hi,
4254                      DAG.getRegister(RISCV::X0, MVT::i32));
4255 }
4256 
4257 // Custom-lower extensions from mask vectors by using a vselect either with 1
4258 // for zero/any-extension or -1 for sign-extension:
4259 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4260 // Note that any-extension is lowered identically to zero-extension.
4261 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4262                                                 int64_t ExtTrueVal) const {
4263   SDLoc DL(Op);
4264   MVT VecVT = Op.getSimpleValueType();
4265   SDValue Src = Op.getOperand(0);
4266   // Only custom-lower extensions from mask types
4267   assert(Src.getValueType().isVector() &&
4268          Src.getValueType().getVectorElementType() == MVT::i1);
4269 
4270   MVT XLenVT = Subtarget.getXLenVT();
4271   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4272   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4273 
4274   if (VecVT.isScalableVector()) {
4275     // Be careful not to introduce illegal scalar types at this stage, and be
4276     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4277     // illegal and must be expanded. Since we know that the constants are
4278     // sign-extended 32-bit values, we use VMV_V_X_VL directly.
4279     bool IsRV32E64 =
4280         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4281 
4282     if (!IsRV32E64) {
4283       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4284       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4285     } else {
4286       SplatZero =
4287           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4288                       SplatZero, DAG.getRegister(RISCV::X0, XLenVT));
4289       SplatTrueVal =
4290           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4291                       SplatTrueVal, DAG.getRegister(RISCV::X0, XLenVT));
4292     }
4293 
4294     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4295   }
4296 
4297   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4298   MVT I1ContainerVT =
4299       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4300 
4301   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4302 
4303   SDValue Mask, VL;
4304   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4305 
4306   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4307                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4308   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4309                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4310   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4311                                SplatTrueVal, SplatZero, VL);
4312 
4313   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4314 }
4315 
4316 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4317     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4318   MVT ExtVT = Op.getSimpleValueType();
4319   // Only custom-lower extensions from fixed-length vector types.
4320   if (!ExtVT.isFixedLengthVector())
4321     return Op;
4322   MVT VT = Op.getOperand(0).getSimpleValueType();
4323   // Grab the canonical container type for the extended type. Infer the smaller
4324   // type from that to ensure the same number of vector elements, as we know
4325   // the LMUL will be sufficient to hold the smaller type.
4326   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4327   // Get the extended container type manually to ensure the same number of
4328   // vector elements between source and dest.
4329   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4330                                      ContainerExtVT.getVectorElementCount());
4331 
4332   SDValue Op1 =
4333       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4334 
4335   SDLoc DL(Op);
4336   SDValue Mask, VL;
4337   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4338 
4339   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4340 
4341   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4342 }
4343 
4344 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4345 // setcc operation:
4346 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4347 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4348                                                   SelectionDAG &DAG) const {
4349   SDLoc DL(Op);
4350   EVT MaskVT = Op.getValueType();
4351   // Only expect to custom-lower truncations to mask types
4352   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4353          "Unexpected type for vector mask lowering");
4354   SDValue Src = Op.getOperand(0);
4355   MVT VecVT = Src.getSimpleValueType();
4356 
4357   // If this is a fixed vector, we need to convert it to a scalable vector.
4358   MVT ContainerVT = VecVT;
4359   if (VecVT.isFixedLengthVector()) {
4360     ContainerVT = getContainerForFixedLengthVector(VecVT);
4361     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4362   }
4363 
4364   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4365   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4366 
4367   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4368                          DAG.getUNDEF(ContainerVT), SplatOne);
4369   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4370                           DAG.getUNDEF(ContainerVT), SplatZero);
4371 
4372   if (VecVT.isScalableVector()) {
4373     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4374     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4375   }
4376 
4377   SDValue Mask, VL;
4378   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4379 
4380   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4381   SDValue Trunc =
4382       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4383   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4384                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4385   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4386 }
4387 
4388 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4389 // first position of a vector, and that vector is slid up to the insert index.
4390 // By limiting the active vector length to index+1 and merging with the
4391 // original vector (with an undisturbed tail policy for elements >= VL), we
4392 // achieve the desired result of leaving all elements untouched except the one
4393 // at VL-1, which is replaced with the desired value.
4394 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4395                                                     SelectionDAG &DAG) const {
4396   SDLoc DL(Op);
4397   MVT VecVT = Op.getSimpleValueType();
4398   SDValue Vec = Op.getOperand(0);
4399   SDValue Val = Op.getOperand(1);
4400   SDValue Idx = Op.getOperand(2);
4401 
4402   if (VecVT.getVectorElementType() == MVT::i1) {
4403     // FIXME: For now we just promote to an i8 vector and insert into that,
4404     // but this is probably not optimal.
4405     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4406     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4407     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4408     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4409   }
4410 
4411   MVT ContainerVT = VecVT;
4412   // If the operand is a fixed-length vector, convert to a scalable one.
4413   if (VecVT.isFixedLengthVector()) {
4414     ContainerVT = getContainerForFixedLengthVector(VecVT);
4415     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4416   }
4417 
4418   MVT XLenVT = Subtarget.getXLenVT();
4419 
4420   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4421   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4422   // Even i64-element vectors on RV32 can be lowered without scalar
4423   // legalization if the most-significant 32 bits of the value are not affected
4424   // by the sign-extension of the lower 32 bits.
4425   // TODO: We could also catch sign extensions of a 32-bit value.
4426   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4427     const auto *CVal = cast<ConstantSDNode>(Val);
4428     if (isInt<32>(CVal->getSExtValue())) {
4429       IsLegalInsert = true;
4430       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4431     }
4432   }
4433 
4434   SDValue Mask, VL;
4435   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4436 
4437   SDValue ValInVec;
4438 
4439   if (IsLegalInsert) {
4440     unsigned Opc =
4441         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4442     if (isNullConstant(Idx)) {
4443       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4444       if (!VecVT.isFixedLengthVector())
4445         return Vec;
4446       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4447     }
4448     ValInVec =
4449         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4450   } else {
4451     // On RV32, i64-element vectors must be specially handled to place the
4452     // value at element 0, by using two vslide1up instructions in sequence on
4453     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4454     // this.
4455     SDValue One = DAG.getConstant(1, DL, XLenVT);
4456     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4457     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4458     MVT I32ContainerVT =
4459         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4460     SDValue I32Mask =
4461         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4462     // Limit the active VL to two.
4463     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4464     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4465     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4466     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4467                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4468     // First slide in the hi value, then the lo in underneath it.
4469     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4470                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4471                            I32Mask, InsertI64VL);
4472     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4473                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4474                            I32Mask, InsertI64VL);
4475     // Bitcast back to the right container type.
4476     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4477   }
4478 
4479   // Now that the value is in a vector, slide it into position.
4480   SDValue InsertVL =
4481       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4482   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4483                                 ValInVec, Idx, Mask, InsertVL);
4484   if (!VecVT.isFixedLengthVector())
4485     return Slideup;
4486   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4487 }
4488 
4489 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4490 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4491 // types this is done using VMV_X_S to allow us to glean information about the
4492 // sign bits of the result.
4493 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4494                                                      SelectionDAG &DAG) const {
4495   SDLoc DL(Op);
4496   SDValue Idx = Op.getOperand(1);
4497   SDValue Vec = Op.getOperand(0);
4498   EVT EltVT = Op.getValueType();
4499   MVT VecVT = Vec.getSimpleValueType();
4500   MVT XLenVT = Subtarget.getXLenVT();
4501 
4502   if (VecVT.getVectorElementType() == MVT::i1) {
4503     if (VecVT.isFixedLengthVector()) {
4504       unsigned NumElts = VecVT.getVectorNumElements();
4505       if (NumElts >= 8) {
4506         MVT WideEltVT;
4507         unsigned WidenVecLen;
4508         SDValue ExtractElementIdx;
4509         SDValue ExtractBitIdx;
4510         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4511         MVT LargestEltVT = MVT::getIntegerVT(
4512             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4513         if (NumElts <= LargestEltVT.getSizeInBits()) {
4514           assert(isPowerOf2_32(NumElts) &&
4515                  "the number of elements should be power of 2");
4516           WideEltVT = MVT::getIntegerVT(NumElts);
4517           WidenVecLen = 1;
4518           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4519           ExtractBitIdx = Idx;
4520         } else {
4521           WideEltVT = LargestEltVT;
4522           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4523           // extract element index = index / element width
4524           ExtractElementIdx = DAG.getNode(
4525               ISD::SRL, DL, XLenVT, Idx,
4526               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4527           // mask bit index = index % element width
4528           ExtractBitIdx = DAG.getNode(
4529               ISD::AND, DL, XLenVT, Idx,
4530               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4531         }
4532         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4533         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4534         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4535                                          Vec, ExtractElementIdx);
4536         // Extract the bit from GPR.
4537         SDValue ShiftRight =
4538             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4539         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4540                            DAG.getConstant(1, DL, XLenVT));
4541       }
4542     }
4543     // Otherwise, promote to an i8 vector and extract from that.
4544     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4545     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4546     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4547   }
4548 
4549   // If this is a fixed vector, we need to convert it to a scalable vector.
4550   MVT ContainerVT = VecVT;
4551   if (VecVT.isFixedLengthVector()) {
4552     ContainerVT = getContainerForFixedLengthVector(VecVT);
4553     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4554   }
4555 
4556   // If the index is 0, the vector is already in the right position.
4557   if (!isNullConstant(Idx)) {
4558     // Use a VL of 1 to avoid processing more elements than we need.
4559     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4560     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4561     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4562     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4563                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4564   }
4565 
4566   if (!EltVT.isInteger()) {
4567     // Floating-point extracts are handled in TableGen.
4568     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4569                        DAG.getConstant(0, DL, XLenVT));
4570   }
4571 
4572   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4573   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4574 }
4575 
4576 // Some RVV intrinsics may claim that they want an integer operand to be
4577 // promoted or expanded.
4578 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4579                                            const RISCVSubtarget &Subtarget) {
4580   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4581           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4582          "Unexpected opcode");
4583 
4584   if (!Subtarget.hasVInstructions())
4585     return SDValue();
4586 
4587   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4588   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4589   SDLoc DL(Op);
4590 
4591   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4592       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4593   if (!II || !II->hasScalarOperand())
4594     return SDValue();
4595 
4596   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4597   assert(SplatOp < Op.getNumOperands());
4598 
4599   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4600   SDValue &ScalarOp = Operands[SplatOp];
4601   MVT OpVT = ScalarOp.getSimpleValueType();
4602   MVT XLenVT = Subtarget.getXLenVT();
4603 
4604   // If this isn't a scalar, or its type is XLenVT we're done.
4605   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4606     return SDValue();
4607 
4608   // Simplest case is that the operand needs to be promoted to XLenVT.
4609   if (OpVT.bitsLT(XLenVT)) {
4610     // If the operand is a constant, sign extend to increase our chances
4611     // of being able to use a .vi instruction. ANY_EXTEND would become a
4612     // a zero extend and the simm5 check in isel would fail.
4613     // FIXME: Should we ignore the upper bits in isel instead?
4614     unsigned ExtOpc =
4615         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4616     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4617     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4618   }
4619 
4620   // Use the previous operand to get the vXi64 VT. The result might be a mask
4621   // VT for compares. Using the previous operand assumes that the previous
4622   // operand will never have a smaller element size than a scalar operand and
4623   // that a widening operation never uses SEW=64.
4624   // NOTE: If this fails the below assert, we can probably just find the
4625   // element count from any operand or result and use it to construct the VT.
4626   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4627   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4628 
4629   // The more complex case is when the scalar is larger than XLenVT.
4630   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4631          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4632 
4633   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4634   // on the instruction to sign-extend since SEW>XLEN.
4635   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4636     if (isInt<32>(CVal->getSExtValue())) {
4637       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4638       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4639     }
4640   }
4641 
4642   switch (IntNo) {
4643   case Intrinsic::riscv_vslide1up:
4644   case Intrinsic::riscv_vslide1down:
4645   case Intrinsic::riscv_vslide1up_mask:
4646   case Intrinsic::riscv_vslide1down_mask: {
4647     // We need to special case these when the scalar is larger than XLen.
4648     unsigned NumOps = Op.getNumOperands();
4649     bool IsMasked = NumOps == 7;
4650 
4651     // Convert the vector source to the equivalent nxvXi32 vector.
4652     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4653     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4654 
4655     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4656                                    DAG.getConstant(0, DL, XLenVT));
4657     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4658                                    DAG.getConstant(1, DL, XLenVT));
4659 
4660     // Double the VL since we halved SEW.
4661     SDValue VL = getVLOperand(Op);
4662     SDValue I32VL =
4663         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4664 
4665     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4666     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4667 
4668     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4669     // instructions.
4670     SDValue Passthru;
4671     if (IsMasked)
4672       Passthru = DAG.getUNDEF(I32VT);
4673     else
4674       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4675 
4676     if (IntNo == Intrinsic::riscv_vslide1up ||
4677         IntNo == Intrinsic::riscv_vslide1up_mask) {
4678       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4679                         ScalarHi, I32Mask, I32VL);
4680       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4681                         ScalarLo, I32Mask, I32VL);
4682     } else {
4683       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4684                         ScalarLo, I32Mask, I32VL);
4685       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4686                         ScalarHi, I32Mask, I32VL);
4687     }
4688 
4689     // Convert back to nxvXi64.
4690     Vec = DAG.getBitcast(VT, Vec);
4691 
4692     if (!IsMasked)
4693       return Vec;
4694     // Apply mask after the operation.
4695     SDValue Mask = Operands[NumOps - 3];
4696     SDValue MaskedOff = Operands[1];
4697     // Assume Policy operand is the last operand.
4698     uint64_t Policy =
4699         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4700     // We don't need to select maskedoff if it's undef.
4701     if (MaskedOff.isUndef())
4702       return Vec;
4703     // TAMU
4704     if (Policy == RISCVII::TAIL_AGNOSTIC)
4705       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4706                          VL);
4707     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4708     // It's fine because vmerge does not care mask policy.
4709     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4710   }
4711   }
4712 
4713   // We need to convert the scalar to a splat vector.
4714   // FIXME: Can we implicitly truncate the scalar if it is known to
4715   // be sign extended?
4716   SDValue VL = getVLOperand(Op);
4717   assert(VL.getValueType() == XLenVT);
4718   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4719   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4720 }
4721 
4722 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4723                                                      SelectionDAG &DAG) const {
4724   unsigned IntNo = Op.getConstantOperandVal(0);
4725   SDLoc DL(Op);
4726   MVT XLenVT = Subtarget.getXLenVT();
4727 
4728   switch (IntNo) {
4729   default:
4730     break; // Don't custom lower most intrinsics.
4731   case Intrinsic::thread_pointer: {
4732     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4733     return DAG.getRegister(RISCV::X4, PtrVT);
4734   }
4735   case Intrinsic::riscv_orc_b:
4736   case Intrinsic::riscv_brev8: {
4737     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4738     unsigned Opc =
4739         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4740     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4741                        DAG.getConstant(7, DL, XLenVT));
4742   }
4743   case Intrinsic::riscv_grev:
4744   case Intrinsic::riscv_gorc: {
4745     unsigned Opc =
4746         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4747     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4748   }
4749   case Intrinsic::riscv_zip:
4750   case Intrinsic::riscv_unzip: {
4751     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4752     // For i32 the immdiate is 15. For i64 the immediate is 31.
4753     unsigned Opc =
4754         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4755     unsigned BitWidth = Op.getValueSizeInBits();
4756     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4757     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4758                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4759   }
4760   case Intrinsic::riscv_shfl:
4761   case Intrinsic::riscv_unshfl: {
4762     unsigned Opc =
4763         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4764     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4765   }
4766   case Intrinsic::riscv_bcompress:
4767   case Intrinsic::riscv_bdecompress: {
4768     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4769                                                        : RISCVISD::BDECOMPRESS;
4770     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4771   }
4772   case Intrinsic::riscv_bfp:
4773     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4774                        Op.getOperand(2));
4775   case Intrinsic::riscv_fsl:
4776     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4777                        Op.getOperand(2), Op.getOperand(3));
4778   case Intrinsic::riscv_fsr:
4779     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4780                        Op.getOperand(2), Op.getOperand(3));
4781   case Intrinsic::riscv_vmv_x_s:
4782     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4783     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4784                        Op.getOperand(1));
4785   case Intrinsic::riscv_vmv_v_x:
4786     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4787                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4788                             Subtarget);
4789   case Intrinsic::riscv_vfmv_v_f:
4790     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4791                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4792   case Intrinsic::riscv_vmv_s_x: {
4793     SDValue Scalar = Op.getOperand(2);
4794 
4795     if (Scalar.getValueType().bitsLE(XLenVT)) {
4796       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4797       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4798                          Op.getOperand(1), Scalar, Op.getOperand(3));
4799     }
4800 
4801     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4802 
4803     // This is an i64 value that lives in two scalar registers. We have to
4804     // insert this in a convoluted way. First we build vXi64 splat containing
4805     // the/ two values that we assemble using some bit math. Next we'll use
4806     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4807     // to merge element 0 from our splat into the source vector.
4808     // FIXME: This is probably not the best way to do this, but it is
4809     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4810     // point.
4811     //   sw lo, (a0)
4812     //   sw hi, 4(a0)
4813     //   vlse vX, (a0)
4814     //
4815     //   vid.v      vVid
4816     //   vmseq.vx   mMask, vVid, 0
4817     //   vmerge.vvm vDest, vSrc, vVal, mMask
4818     MVT VT = Op.getSimpleValueType();
4819     SDValue Vec = Op.getOperand(1);
4820     SDValue VL = getVLOperand(Op);
4821 
4822     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4823     if (Op.getOperand(1).isUndef())
4824       return SplattedVal;
4825     SDValue SplattedIdx =
4826         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4827                     DAG.getConstant(0, DL, MVT::i32), VL);
4828 
4829     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4830     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4831     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4832     SDValue SelectCond =
4833         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4834                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4835     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4836                        Vec, VL);
4837   }
4838   }
4839 
4840   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4841 }
4842 
4843 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4844                                                     SelectionDAG &DAG) const {
4845   unsigned IntNo = Op.getConstantOperandVal(1);
4846   switch (IntNo) {
4847   default:
4848     break;
4849   case Intrinsic::riscv_masked_strided_load: {
4850     SDLoc DL(Op);
4851     MVT XLenVT = Subtarget.getXLenVT();
4852 
4853     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4854     // the selection of the masked intrinsics doesn't do this for us.
4855     SDValue Mask = Op.getOperand(5);
4856     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4857 
4858     MVT VT = Op->getSimpleValueType(0);
4859     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4860 
4861     SDValue PassThru = Op.getOperand(2);
4862     if (!IsUnmasked) {
4863       MVT MaskVT =
4864           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4865       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4866       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4867     }
4868 
4869     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4870 
4871     SDValue IntID = DAG.getTargetConstant(
4872         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4873         XLenVT);
4874 
4875     auto *Load = cast<MemIntrinsicSDNode>(Op);
4876     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4877     if (IsUnmasked)
4878       Ops.push_back(DAG.getUNDEF(ContainerVT));
4879     else
4880       Ops.push_back(PassThru);
4881     Ops.push_back(Op.getOperand(3)); // Ptr
4882     Ops.push_back(Op.getOperand(4)); // Stride
4883     if (!IsUnmasked)
4884       Ops.push_back(Mask);
4885     Ops.push_back(VL);
4886     if (!IsUnmasked) {
4887       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4888       Ops.push_back(Policy);
4889     }
4890 
4891     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4892     SDValue Result =
4893         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4894                                 Load->getMemoryVT(), Load->getMemOperand());
4895     SDValue Chain = Result.getValue(1);
4896     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4897     return DAG.getMergeValues({Result, Chain}, DL);
4898   }
4899   case Intrinsic::riscv_seg2_load:
4900   case Intrinsic::riscv_seg3_load:
4901   case Intrinsic::riscv_seg4_load:
4902   case Intrinsic::riscv_seg5_load:
4903   case Intrinsic::riscv_seg6_load:
4904   case Intrinsic::riscv_seg7_load:
4905   case Intrinsic::riscv_seg8_load: {
4906     SDLoc DL(Op);
4907     static const Intrinsic::ID VlsegInts[7] = {
4908         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4909         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4910         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4911         Intrinsic::riscv_vlseg8};
4912     unsigned NF = Op->getNumValues() - 1;
4913     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4914     MVT XLenVT = Subtarget.getXLenVT();
4915     MVT VT = Op->getSimpleValueType(0);
4916     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4917 
4918     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4919     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4920     auto *Load = cast<MemIntrinsicSDNode>(Op);
4921     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4922     ContainerVTs.push_back(MVT::Other);
4923     SDVTList VTs = DAG.getVTList(ContainerVTs);
4924     SDValue Result =
4925         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4926                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4927                                 Load->getMemoryVT(), Load->getMemOperand());
4928     SmallVector<SDValue, 9> Results;
4929     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4930       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4931                                                   DAG, Subtarget));
4932     Results.push_back(Result.getValue(NF));
4933     return DAG.getMergeValues(Results, DL);
4934   }
4935   }
4936 
4937   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4938 }
4939 
4940 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4941                                                  SelectionDAG &DAG) const {
4942   unsigned IntNo = Op.getConstantOperandVal(1);
4943   switch (IntNo) {
4944   default:
4945     break;
4946   case Intrinsic::riscv_masked_strided_store: {
4947     SDLoc DL(Op);
4948     MVT XLenVT = Subtarget.getXLenVT();
4949 
4950     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4951     // the selection of the masked intrinsics doesn't do this for us.
4952     SDValue Mask = Op.getOperand(5);
4953     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4954 
4955     SDValue Val = Op.getOperand(2);
4956     MVT VT = Val.getSimpleValueType();
4957     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4958 
4959     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4960     if (!IsUnmasked) {
4961       MVT MaskVT =
4962           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4963       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4964     }
4965 
4966     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4967 
4968     SDValue IntID = DAG.getTargetConstant(
4969         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4970         XLenVT);
4971 
4972     auto *Store = cast<MemIntrinsicSDNode>(Op);
4973     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4974     Ops.push_back(Val);
4975     Ops.push_back(Op.getOperand(3)); // Ptr
4976     Ops.push_back(Op.getOperand(4)); // Stride
4977     if (!IsUnmasked)
4978       Ops.push_back(Mask);
4979     Ops.push_back(VL);
4980 
4981     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4982                                    Ops, Store->getMemoryVT(),
4983                                    Store->getMemOperand());
4984   }
4985   }
4986 
4987   return SDValue();
4988 }
4989 
4990 static MVT getLMUL1VT(MVT VT) {
4991   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4992          "Unexpected vector MVT");
4993   return MVT::getScalableVectorVT(
4994       VT.getVectorElementType(),
4995       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4996 }
4997 
4998 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4999   switch (ISDOpcode) {
5000   default:
5001     llvm_unreachable("Unhandled reduction");
5002   case ISD::VECREDUCE_ADD:
5003     return RISCVISD::VECREDUCE_ADD_VL;
5004   case ISD::VECREDUCE_UMAX:
5005     return RISCVISD::VECREDUCE_UMAX_VL;
5006   case ISD::VECREDUCE_SMAX:
5007     return RISCVISD::VECREDUCE_SMAX_VL;
5008   case ISD::VECREDUCE_UMIN:
5009     return RISCVISD::VECREDUCE_UMIN_VL;
5010   case ISD::VECREDUCE_SMIN:
5011     return RISCVISD::VECREDUCE_SMIN_VL;
5012   case ISD::VECREDUCE_AND:
5013     return RISCVISD::VECREDUCE_AND_VL;
5014   case ISD::VECREDUCE_OR:
5015     return RISCVISD::VECREDUCE_OR_VL;
5016   case ISD::VECREDUCE_XOR:
5017     return RISCVISD::VECREDUCE_XOR_VL;
5018   }
5019 }
5020 
5021 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5022                                                          SelectionDAG &DAG,
5023                                                          bool IsVP) const {
5024   SDLoc DL(Op);
5025   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5026   MVT VecVT = Vec.getSimpleValueType();
5027   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5028           Op.getOpcode() == ISD::VECREDUCE_OR ||
5029           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5030           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5031           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5032           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5033          "Unexpected reduction lowering");
5034 
5035   MVT XLenVT = Subtarget.getXLenVT();
5036   assert(Op.getValueType() == XLenVT &&
5037          "Expected reduction output to be legalized to XLenVT");
5038 
5039   MVT ContainerVT = VecVT;
5040   if (VecVT.isFixedLengthVector()) {
5041     ContainerVT = getContainerForFixedLengthVector(VecVT);
5042     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5043   }
5044 
5045   SDValue Mask, VL;
5046   if (IsVP) {
5047     Mask = Op.getOperand(2);
5048     VL = Op.getOperand(3);
5049   } else {
5050     std::tie(Mask, VL) =
5051         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5052   }
5053 
5054   unsigned BaseOpc;
5055   ISD::CondCode CC;
5056   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5057 
5058   switch (Op.getOpcode()) {
5059   default:
5060     llvm_unreachable("Unhandled reduction");
5061   case ISD::VECREDUCE_AND:
5062   case ISD::VP_REDUCE_AND: {
5063     // vcpop ~x == 0
5064     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5065     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5066     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5067     CC = ISD::SETEQ;
5068     BaseOpc = ISD::AND;
5069     break;
5070   }
5071   case ISD::VECREDUCE_OR:
5072   case ISD::VP_REDUCE_OR:
5073     // vcpop x != 0
5074     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5075     CC = ISD::SETNE;
5076     BaseOpc = ISD::OR;
5077     break;
5078   case ISD::VECREDUCE_XOR:
5079   case ISD::VP_REDUCE_XOR: {
5080     // ((vcpop x) & 1) != 0
5081     SDValue One = DAG.getConstant(1, DL, XLenVT);
5082     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5083     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5084     CC = ISD::SETNE;
5085     BaseOpc = ISD::XOR;
5086     break;
5087   }
5088   }
5089 
5090   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5091 
5092   if (!IsVP)
5093     return SetCC;
5094 
5095   // Now include the start value in the operation.
5096   // Note that we must return the start value when no elements are operated
5097   // upon. The vcpop instructions we've emitted in each case above will return
5098   // 0 for an inactive vector, and so we've already received the neutral value:
5099   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5100   // can simply include the start value.
5101   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5102 }
5103 
5104 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5105                                             SelectionDAG &DAG) const {
5106   SDLoc DL(Op);
5107   SDValue Vec = Op.getOperand(0);
5108   EVT VecEVT = Vec.getValueType();
5109 
5110   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5111 
5112   // Due to ordering in legalize types we may have a vector type that needs to
5113   // be split. Do that manually so we can get down to a legal type.
5114   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5115          TargetLowering::TypeSplitVector) {
5116     SDValue Lo, Hi;
5117     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5118     VecEVT = Lo.getValueType();
5119     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5120   }
5121 
5122   // TODO: The type may need to be widened rather than split. Or widened before
5123   // it can be split.
5124   if (!isTypeLegal(VecEVT))
5125     return SDValue();
5126 
5127   MVT VecVT = VecEVT.getSimpleVT();
5128   MVT VecEltVT = VecVT.getVectorElementType();
5129   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5130 
5131   MVT ContainerVT = VecVT;
5132   if (VecVT.isFixedLengthVector()) {
5133     ContainerVT = getContainerForFixedLengthVector(VecVT);
5134     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5135   }
5136 
5137   MVT M1VT = getLMUL1VT(ContainerVT);
5138   MVT XLenVT = Subtarget.getXLenVT();
5139 
5140   SDValue Mask, VL;
5141   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5142 
5143   SDValue NeutralElem =
5144       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5145   SDValue IdentitySplat =
5146       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5147                        M1VT, DL, DAG, Subtarget);
5148   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5149                                   IdentitySplat, Mask, VL);
5150   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5151                              DAG.getConstant(0, DL, XLenVT));
5152   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5153 }
5154 
5155 // Given a reduction op, this function returns the matching reduction opcode,
5156 // the vector SDValue and the scalar SDValue required to lower this to a
5157 // RISCVISD node.
5158 static std::tuple<unsigned, SDValue, SDValue>
5159 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5160   SDLoc DL(Op);
5161   auto Flags = Op->getFlags();
5162   unsigned Opcode = Op.getOpcode();
5163   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5164   switch (Opcode) {
5165   default:
5166     llvm_unreachable("Unhandled reduction");
5167   case ISD::VECREDUCE_FADD: {
5168     // Use positive zero if we can. It is cheaper to materialize.
5169     SDValue Zero =
5170         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5171     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5172   }
5173   case ISD::VECREDUCE_SEQ_FADD:
5174     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5175                            Op.getOperand(0));
5176   case ISD::VECREDUCE_FMIN:
5177     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5178                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5179   case ISD::VECREDUCE_FMAX:
5180     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5181                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5182   }
5183 }
5184 
5185 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5186                                               SelectionDAG &DAG) const {
5187   SDLoc DL(Op);
5188   MVT VecEltVT = Op.getSimpleValueType();
5189 
5190   unsigned RVVOpcode;
5191   SDValue VectorVal, ScalarVal;
5192   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5193       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5194   MVT VecVT = VectorVal.getSimpleValueType();
5195 
5196   MVT ContainerVT = VecVT;
5197   if (VecVT.isFixedLengthVector()) {
5198     ContainerVT = getContainerForFixedLengthVector(VecVT);
5199     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5200   }
5201 
5202   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5203   MVT XLenVT = Subtarget.getXLenVT();
5204 
5205   SDValue Mask, VL;
5206   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5207 
5208   SDValue ScalarSplat =
5209       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5210                        M1VT, DL, DAG, Subtarget);
5211   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5212                                   VectorVal, ScalarSplat, Mask, VL);
5213   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5214                      DAG.getConstant(0, DL, XLenVT));
5215 }
5216 
5217 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5218   switch (ISDOpcode) {
5219   default:
5220     llvm_unreachable("Unhandled reduction");
5221   case ISD::VP_REDUCE_ADD:
5222     return RISCVISD::VECREDUCE_ADD_VL;
5223   case ISD::VP_REDUCE_UMAX:
5224     return RISCVISD::VECREDUCE_UMAX_VL;
5225   case ISD::VP_REDUCE_SMAX:
5226     return RISCVISD::VECREDUCE_SMAX_VL;
5227   case ISD::VP_REDUCE_UMIN:
5228     return RISCVISD::VECREDUCE_UMIN_VL;
5229   case ISD::VP_REDUCE_SMIN:
5230     return RISCVISD::VECREDUCE_SMIN_VL;
5231   case ISD::VP_REDUCE_AND:
5232     return RISCVISD::VECREDUCE_AND_VL;
5233   case ISD::VP_REDUCE_OR:
5234     return RISCVISD::VECREDUCE_OR_VL;
5235   case ISD::VP_REDUCE_XOR:
5236     return RISCVISD::VECREDUCE_XOR_VL;
5237   case ISD::VP_REDUCE_FADD:
5238     return RISCVISD::VECREDUCE_FADD_VL;
5239   case ISD::VP_REDUCE_SEQ_FADD:
5240     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5241   case ISD::VP_REDUCE_FMAX:
5242     return RISCVISD::VECREDUCE_FMAX_VL;
5243   case ISD::VP_REDUCE_FMIN:
5244     return RISCVISD::VECREDUCE_FMIN_VL;
5245   }
5246 }
5247 
5248 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5249                                            SelectionDAG &DAG) const {
5250   SDLoc DL(Op);
5251   SDValue Vec = Op.getOperand(1);
5252   EVT VecEVT = Vec.getValueType();
5253 
5254   // TODO: The type may need to be widened rather than split. Or widened before
5255   // it can be split.
5256   if (!isTypeLegal(VecEVT))
5257     return SDValue();
5258 
5259   MVT VecVT = VecEVT.getSimpleVT();
5260   MVT VecEltVT = VecVT.getVectorElementType();
5261   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5262 
5263   MVT ContainerVT = VecVT;
5264   if (VecVT.isFixedLengthVector()) {
5265     ContainerVT = getContainerForFixedLengthVector(VecVT);
5266     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5267   }
5268 
5269   SDValue VL = Op.getOperand(3);
5270   SDValue Mask = Op.getOperand(2);
5271 
5272   MVT M1VT = getLMUL1VT(ContainerVT);
5273   MVT XLenVT = Subtarget.getXLenVT();
5274   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5275 
5276   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5277                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5278                                         DL, DAG, Subtarget);
5279   SDValue Reduction =
5280       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5281   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5282                              DAG.getConstant(0, DL, XLenVT));
5283   if (!VecVT.isInteger())
5284     return Elt0;
5285   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5286 }
5287 
5288 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5289                                                    SelectionDAG &DAG) const {
5290   SDValue Vec = Op.getOperand(0);
5291   SDValue SubVec = Op.getOperand(1);
5292   MVT VecVT = Vec.getSimpleValueType();
5293   MVT SubVecVT = SubVec.getSimpleValueType();
5294 
5295   SDLoc DL(Op);
5296   MVT XLenVT = Subtarget.getXLenVT();
5297   unsigned OrigIdx = Op.getConstantOperandVal(2);
5298   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5299 
5300   // We don't have the ability to slide mask vectors up indexed by their i1
5301   // elements; the smallest we can do is i8. Often we are able to bitcast to
5302   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5303   // into a scalable one, we might not necessarily have enough scalable
5304   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5305   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5306       (OrigIdx != 0 || !Vec.isUndef())) {
5307     if (VecVT.getVectorMinNumElements() >= 8 &&
5308         SubVecVT.getVectorMinNumElements() >= 8) {
5309       assert(OrigIdx % 8 == 0 && "Invalid index");
5310       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5311              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5312              "Unexpected mask vector lowering");
5313       OrigIdx /= 8;
5314       SubVecVT =
5315           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5316                            SubVecVT.isScalableVector());
5317       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5318                                VecVT.isScalableVector());
5319       Vec = DAG.getBitcast(VecVT, Vec);
5320       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5321     } else {
5322       // We can't slide this mask vector up indexed by its i1 elements.
5323       // This poses a problem when we wish to insert a scalable vector which
5324       // can't be re-expressed as a larger type. Just choose the slow path and
5325       // extend to a larger type, then truncate back down.
5326       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5327       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5328       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5329       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5330       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5331                         Op.getOperand(2));
5332       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5333       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5334     }
5335   }
5336 
5337   // If the subvector vector is a fixed-length type, we cannot use subregister
5338   // manipulation to simplify the codegen; we don't know which register of a
5339   // LMUL group contains the specific subvector as we only know the minimum
5340   // register size. Therefore we must slide the vector group up the full
5341   // amount.
5342   if (SubVecVT.isFixedLengthVector()) {
5343     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5344       return Op;
5345     MVT ContainerVT = VecVT;
5346     if (VecVT.isFixedLengthVector()) {
5347       ContainerVT = getContainerForFixedLengthVector(VecVT);
5348       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5349     }
5350     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5351                          DAG.getUNDEF(ContainerVT), SubVec,
5352                          DAG.getConstant(0, DL, XLenVT));
5353     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5354       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5355       return DAG.getBitcast(Op.getValueType(), SubVec);
5356     }
5357     SDValue Mask =
5358         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5359     // Set the vector length to only the number of elements we care about. Note
5360     // that for slideup this includes the offset.
5361     SDValue VL =
5362         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5363     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5364     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5365                                   SubVec, SlideupAmt, Mask, VL);
5366     if (VecVT.isFixedLengthVector())
5367       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5368     return DAG.getBitcast(Op.getValueType(), Slideup);
5369   }
5370 
5371   unsigned SubRegIdx, RemIdx;
5372   std::tie(SubRegIdx, RemIdx) =
5373       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5374           VecVT, SubVecVT, OrigIdx, TRI);
5375 
5376   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5377   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5378                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5379                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5380 
5381   // 1. If the Idx has been completely eliminated and this subvector's size is
5382   // a vector register or a multiple thereof, or the surrounding elements are
5383   // undef, then this is a subvector insert which naturally aligns to a vector
5384   // register. These can easily be handled using subregister manipulation.
5385   // 2. If the subvector is smaller than a vector register, then the insertion
5386   // must preserve the undisturbed elements of the register. We do this by
5387   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5388   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5389   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5390   // LMUL=1 type back into the larger vector (resolving to another subregister
5391   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5392   // to avoid allocating a large register group to hold our subvector.
5393   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5394     return Op;
5395 
5396   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5397   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5398   // (in our case undisturbed). This means we can set up a subvector insertion
5399   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5400   // size of the subvector.
5401   MVT InterSubVT = VecVT;
5402   SDValue AlignedExtract = Vec;
5403   unsigned AlignedIdx = OrigIdx - RemIdx;
5404   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5405     InterSubVT = getLMUL1VT(VecVT);
5406     // Extract a subvector equal to the nearest full vector register type. This
5407     // should resolve to a EXTRACT_SUBREG instruction.
5408     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5409                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5410   }
5411 
5412   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5413   // For scalable vectors this must be further multiplied by vscale.
5414   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5415 
5416   SDValue Mask, VL;
5417   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5418 
5419   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5420   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5421   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5422   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5423 
5424   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5425                        DAG.getUNDEF(InterSubVT), SubVec,
5426                        DAG.getConstant(0, DL, XLenVT));
5427 
5428   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5429                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5430 
5431   // If required, insert this subvector back into the correct vector register.
5432   // This should resolve to an INSERT_SUBREG instruction.
5433   if (VecVT.bitsGT(InterSubVT))
5434     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5435                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5436 
5437   // We might have bitcast from a mask type: cast back to the original type if
5438   // required.
5439   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5440 }
5441 
5442 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5443                                                     SelectionDAG &DAG) const {
5444   SDValue Vec = Op.getOperand(0);
5445   MVT SubVecVT = Op.getSimpleValueType();
5446   MVT VecVT = Vec.getSimpleValueType();
5447 
5448   SDLoc DL(Op);
5449   MVT XLenVT = Subtarget.getXLenVT();
5450   unsigned OrigIdx = Op.getConstantOperandVal(1);
5451   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5452 
5453   // We don't have the ability to slide mask vectors down indexed by their i1
5454   // elements; the smallest we can do is i8. Often we are able to bitcast to
5455   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5456   // from a scalable one, we might not necessarily have enough scalable
5457   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5458   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5459     if (VecVT.getVectorMinNumElements() >= 8 &&
5460         SubVecVT.getVectorMinNumElements() >= 8) {
5461       assert(OrigIdx % 8 == 0 && "Invalid index");
5462       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5463              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5464              "Unexpected mask vector lowering");
5465       OrigIdx /= 8;
5466       SubVecVT =
5467           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5468                            SubVecVT.isScalableVector());
5469       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5470                                VecVT.isScalableVector());
5471       Vec = DAG.getBitcast(VecVT, Vec);
5472     } else {
5473       // We can't slide this mask vector down, indexed by its i1 elements.
5474       // This poses a problem when we wish to extract a scalable vector which
5475       // can't be re-expressed as a larger type. Just choose the slow path and
5476       // extend to a larger type, then truncate back down.
5477       // TODO: We could probably improve this when extracting certain fixed
5478       // from fixed, where we can extract as i8 and shift the correct element
5479       // right to reach the desired subvector?
5480       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5481       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5482       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5483       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5484                         Op.getOperand(1));
5485       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5486       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5487     }
5488   }
5489 
5490   // If the subvector vector is a fixed-length type, we cannot use subregister
5491   // manipulation to simplify the codegen; we don't know which register of a
5492   // LMUL group contains the specific subvector as we only know the minimum
5493   // register size. Therefore we must slide the vector group down the full
5494   // amount.
5495   if (SubVecVT.isFixedLengthVector()) {
5496     // With an index of 0 this is a cast-like subvector, which can be performed
5497     // with subregister operations.
5498     if (OrigIdx == 0)
5499       return Op;
5500     MVT ContainerVT = VecVT;
5501     if (VecVT.isFixedLengthVector()) {
5502       ContainerVT = getContainerForFixedLengthVector(VecVT);
5503       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5504     }
5505     SDValue Mask =
5506         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5507     // Set the vector length to only the number of elements we care about. This
5508     // avoids sliding down elements we're going to discard straight away.
5509     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5510     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5511     SDValue Slidedown =
5512         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5513                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5514     // Now we can use a cast-like subvector extract to get the result.
5515     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5516                             DAG.getConstant(0, DL, XLenVT));
5517     return DAG.getBitcast(Op.getValueType(), Slidedown);
5518   }
5519 
5520   unsigned SubRegIdx, RemIdx;
5521   std::tie(SubRegIdx, RemIdx) =
5522       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5523           VecVT, SubVecVT, OrigIdx, TRI);
5524 
5525   // If the Idx has been completely eliminated then this is a subvector extract
5526   // which naturally aligns to a vector register. These can easily be handled
5527   // using subregister manipulation.
5528   if (RemIdx == 0)
5529     return Op;
5530 
5531   // Else we must shift our vector register directly to extract the subvector.
5532   // Do this using VSLIDEDOWN.
5533 
5534   // If the vector type is an LMUL-group type, extract a subvector equal to the
5535   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5536   // instruction.
5537   MVT InterSubVT = VecVT;
5538   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5539     InterSubVT = getLMUL1VT(VecVT);
5540     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5541                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5542   }
5543 
5544   // Slide this vector register down by the desired number of elements in order
5545   // to place the desired subvector starting at element 0.
5546   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5547   // For scalable vectors this must be further multiplied by vscale.
5548   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5549 
5550   SDValue Mask, VL;
5551   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5552   SDValue Slidedown =
5553       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5554                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5555 
5556   // Now the vector is in the right position, extract our final subvector. This
5557   // should resolve to a COPY.
5558   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5559                           DAG.getConstant(0, DL, XLenVT));
5560 
5561   // We might have bitcast from a mask type: cast back to the original type if
5562   // required.
5563   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5564 }
5565 
5566 // Lower step_vector to the vid instruction. Any non-identity step value must
5567 // be accounted for my manual expansion.
5568 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5569                                               SelectionDAG &DAG) const {
5570   SDLoc DL(Op);
5571   MVT VT = Op.getSimpleValueType();
5572   MVT XLenVT = Subtarget.getXLenVT();
5573   SDValue Mask, VL;
5574   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5575   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5576   uint64_t StepValImm = Op.getConstantOperandVal(0);
5577   if (StepValImm != 1) {
5578     if (isPowerOf2_64(StepValImm)) {
5579       SDValue StepVal =
5580           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5581                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5582       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5583     } else {
5584       SDValue StepVal = lowerScalarSplat(
5585           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5586           VL, VT, DL, DAG, Subtarget);
5587       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5588     }
5589   }
5590   return StepVec;
5591 }
5592 
5593 // Implement vector_reverse using vrgather.vv with indices determined by
5594 // subtracting the id of each element from (VLMAX-1). This will convert
5595 // the indices like so:
5596 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5597 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5598 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5599                                                  SelectionDAG &DAG) const {
5600   SDLoc DL(Op);
5601   MVT VecVT = Op.getSimpleValueType();
5602   unsigned EltSize = VecVT.getScalarSizeInBits();
5603   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5604 
5605   unsigned MaxVLMAX = 0;
5606   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5607   if (VectorBitsMax != 0)
5608     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5609 
5610   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5611   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5612 
5613   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5614   // to use vrgatherei16.vv.
5615   // TODO: It's also possible to use vrgatherei16.vv for other types to
5616   // decrease register width for the index calculation.
5617   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5618     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5619     // Reverse each half, then reassemble them in reverse order.
5620     // NOTE: It's also possible that after splitting that VLMAX no longer
5621     // requires vrgatherei16.vv.
5622     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5623       SDValue Lo, Hi;
5624       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5625       EVT LoVT, HiVT;
5626       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5627       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5628       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5629       // Reassemble the low and high pieces reversed.
5630       // FIXME: This is a CONCAT_VECTORS.
5631       SDValue Res =
5632           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5633                       DAG.getIntPtrConstant(0, DL));
5634       return DAG.getNode(
5635           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5636           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5637     }
5638 
5639     // Just promote the int type to i16 which will double the LMUL.
5640     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5641     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5642   }
5643 
5644   MVT XLenVT = Subtarget.getXLenVT();
5645   SDValue Mask, VL;
5646   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5647 
5648   // Calculate VLMAX-1 for the desired SEW.
5649   unsigned MinElts = VecVT.getVectorMinNumElements();
5650   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5651                               DAG.getConstant(MinElts, DL, XLenVT));
5652   SDValue VLMinus1 =
5653       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5654 
5655   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5656   bool IsRV32E64 =
5657       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5658   SDValue SplatVL;
5659   if (!IsRV32E64)
5660     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5661   else
5662     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5663                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5664 
5665   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5666   SDValue Indices =
5667       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5668 
5669   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5670 }
5671 
5672 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5673                                                 SelectionDAG &DAG) const {
5674   SDLoc DL(Op);
5675   SDValue V1 = Op.getOperand(0);
5676   SDValue V2 = Op.getOperand(1);
5677   MVT XLenVT = Subtarget.getXLenVT();
5678   MVT VecVT = Op.getSimpleValueType();
5679 
5680   unsigned MinElts = VecVT.getVectorMinNumElements();
5681   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5682                               DAG.getConstant(MinElts, DL, XLenVT));
5683 
5684   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5685   SDValue DownOffset, UpOffset;
5686   if (ImmValue >= 0) {
5687     // The operand is a TargetConstant, we need to rebuild it as a regular
5688     // constant.
5689     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5690     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5691   } else {
5692     // The operand is a TargetConstant, we need to rebuild it as a regular
5693     // constant rather than negating the original operand.
5694     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5695     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5696   }
5697 
5698   MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5699   SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VLMax);
5700 
5701   SDValue SlideDown =
5702       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5703                   DownOffset, TrueMask, UpOffset);
5704   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5705                      TrueMask,
5706                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5707 }
5708 
5709 SDValue
5710 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5711                                                      SelectionDAG &DAG) const {
5712   SDLoc DL(Op);
5713   auto *Load = cast<LoadSDNode>(Op);
5714 
5715   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5716                                         Load->getMemoryVT(),
5717                                         *Load->getMemOperand()) &&
5718          "Expecting a correctly-aligned load");
5719 
5720   MVT VT = Op.getSimpleValueType();
5721   MVT XLenVT = Subtarget.getXLenVT();
5722   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5723 
5724   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5725 
5726   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5727   SDValue IntID = DAG.getTargetConstant(
5728       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5729   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5730   if (!IsMaskOp)
5731     Ops.push_back(DAG.getUNDEF(ContainerVT));
5732   Ops.push_back(Load->getBasePtr());
5733   Ops.push_back(VL);
5734   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5735   SDValue NewLoad =
5736       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5737                               Load->getMemoryVT(), Load->getMemOperand());
5738 
5739   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5740   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5741 }
5742 
5743 SDValue
5744 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5745                                                       SelectionDAG &DAG) const {
5746   SDLoc DL(Op);
5747   auto *Store = cast<StoreSDNode>(Op);
5748 
5749   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5750                                         Store->getMemoryVT(),
5751                                         *Store->getMemOperand()) &&
5752          "Expecting a correctly-aligned store");
5753 
5754   SDValue StoreVal = Store->getValue();
5755   MVT VT = StoreVal.getSimpleValueType();
5756   MVT XLenVT = Subtarget.getXLenVT();
5757 
5758   // If the size less than a byte, we need to pad with zeros to make a byte.
5759   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5760     VT = MVT::v8i1;
5761     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5762                            DAG.getConstant(0, DL, VT), StoreVal,
5763                            DAG.getIntPtrConstant(0, DL));
5764   }
5765 
5766   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5767 
5768   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5769 
5770   SDValue NewValue =
5771       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5772 
5773   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5774   SDValue IntID = DAG.getTargetConstant(
5775       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5776   return DAG.getMemIntrinsicNode(
5777       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5778       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5779       Store->getMemoryVT(), Store->getMemOperand());
5780 }
5781 
5782 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5783                                              SelectionDAG &DAG) const {
5784   SDLoc DL(Op);
5785   MVT VT = Op.getSimpleValueType();
5786 
5787   const auto *MemSD = cast<MemSDNode>(Op);
5788   EVT MemVT = MemSD->getMemoryVT();
5789   MachineMemOperand *MMO = MemSD->getMemOperand();
5790   SDValue Chain = MemSD->getChain();
5791   SDValue BasePtr = MemSD->getBasePtr();
5792 
5793   SDValue Mask, PassThru, VL;
5794   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5795     Mask = VPLoad->getMask();
5796     PassThru = DAG.getUNDEF(VT);
5797     VL = VPLoad->getVectorLength();
5798   } else {
5799     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5800     Mask = MLoad->getMask();
5801     PassThru = MLoad->getPassThru();
5802   }
5803 
5804   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5805 
5806   MVT XLenVT = Subtarget.getXLenVT();
5807 
5808   MVT ContainerVT = VT;
5809   if (VT.isFixedLengthVector()) {
5810     ContainerVT = getContainerForFixedLengthVector(VT);
5811     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5812     if (!IsUnmasked) {
5813       MVT MaskVT =
5814           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5815       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5816     }
5817   }
5818 
5819   if (!VL)
5820     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5821 
5822   unsigned IntID =
5823       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5824   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5825   if (IsUnmasked)
5826     Ops.push_back(DAG.getUNDEF(ContainerVT));
5827   else
5828     Ops.push_back(PassThru);
5829   Ops.push_back(BasePtr);
5830   if (!IsUnmasked)
5831     Ops.push_back(Mask);
5832   Ops.push_back(VL);
5833   if (!IsUnmasked)
5834     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5835 
5836   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5837 
5838   SDValue Result =
5839       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5840   Chain = Result.getValue(1);
5841 
5842   if (VT.isFixedLengthVector())
5843     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5844 
5845   return DAG.getMergeValues({Result, Chain}, DL);
5846 }
5847 
5848 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5849                                               SelectionDAG &DAG) const {
5850   SDLoc DL(Op);
5851 
5852   const auto *MemSD = cast<MemSDNode>(Op);
5853   EVT MemVT = MemSD->getMemoryVT();
5854   MachineMemOperand *MMO = MemSD->getMemOperand();
5855   SDValue Chain = MemSD->getChain();
5856   SDValue BasePtr = MemSD->getBasePtr();
5857   SDValue Val, Mask, VL;
5858 
5859   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5860     Val = VPStore->getValue();
5861     Mask = VPStore->getMask();
5862     VL = VPStore->getVectorLength();
5863   } else {
5864     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5865     Val = MStore->getValue();
5866     Mask = MStore->getMask();
5867   }
5868 
5869   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5870 
5871   MVT VT = Val.getSimpleValueType();
5872   MVT XLenVT = Subtarget.getXLenVT();
5873 
5874   MVT ContainerVT = VT;
5875   if (VT.isFixedLengthVector()) {
5876     ContainerVT = getContainerForFixedLengthVector(VT);
5877 
5878     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5879     if (!IsUnmasked) {
5880       MVT MaskVT =
5881           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5882       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5883     }
5884   }
5885 
5886   if (!VL)
5887     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5888 
5889   unsigned IntID =
5890       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5891   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5892   Ops.push_back(Val);
5893   Ops.push_back(BasePtr);
5894   if (!IsUnmasked)
5895     Ops.push_back(Mask);
5896   Ops.push_back(VL);
5897 
5898   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5899                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5900 }
5901 
5902 SDValue
5903 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5904                                                       SelectionDAG &DAG) const {
5905   MVT InVT = Op.getOperand(0).getSimpleValueType();
5906   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5907 
5908   MVT VT = Op.getSimpleValueType();
5909 
5910   SDValue Op1 =
5911       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5912   SDValue Op2 =
5913       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5914 
5915   SDLoc DL(Op);
5916   SDValue VL =
5917       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5918 
5919   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5920   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5921 
5922   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5923                             Op.getOperand(2), Mask, VL);
5924 
5925   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5926 }
5927 
5928 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5929     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5930   MVT VT = Op.getSimpleValueType();
5931 
5932   if (VT.getVectorElementType() == MVT::i1)
5933     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5934 
5935   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5936 }
5937 
5938 SDValue
5939 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5940                                                       SelectionDAG &DAG) const {
5941   unsigned Opc;
5942   switch (Op.getOpcode()) {
5943   default: llvm_unreachable("Unexpected opcode!");
5944   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5945   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5946   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5947   }
5948 
5949   return lowerToScalableOp(Op, DAG, Opc);
5950 }
5951 
5952 // Lower vector ABS to smax(X, sub(0, X)).
5953 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5954   SDLoc DL(Op);
5955   MVT VT = Op.getSimpleValueType();
5956   SDValue X = Op.getOperand(0);
5957 
5958   assert(VT.isFixedLengthVector() && "Unexpected type");
5959 
5960   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5961   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5962 
5963   SDValue Mask, VL;
5964   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5965 
5966   SDValue SplatZero = DAG.getNode(
5967       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5968       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5969   SDValue NegX =
5970       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5971   SDValue Max =
5972       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5973 
5974   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5975 }
5976 
5977 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5978     SDValue Op, SelectionDAG &DAG) const {
5979   SDLoc DL(Op);
5980   MVT VT = Op.getSimpleValueType();
5981   SDValue Mag = Op.getOperand(0);
5982   SDValue Sign = Op.getOperand(1);
5983   assert(Mag.getValueType() == Sign.getValueType() &&
5984          "Can only handle COPYSIGN with matching types.");
5985 
5986   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5987   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5988   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5989 
5990   SDValue Mask, VL;
5991   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5992 
5993   SDValue CopySign =
5994       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5995 
5996   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5997 }
5998 
5999 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
6000     SDValue Op, SelectionDAG &DAG) const {
6001   MVT VT = Op.getSimpleValueType();
6002   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6003 
6004   MVT I1ContainerVT =
6005       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6006 
6007   SDValue CC =
6008       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6009   SDValue Op1 =
6010       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6011   SDValue Op2 =
6012       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6013 
6014   SDLoc DL(Op);
6015   SDValue Mask, VL;
6016   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6017 
6018   SDValue Select =
6019       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6020 
6021   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6022 }
6023 
6024 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6025                                                unsigned NewOpc,
6026                                                bool HasMask) const {
6027   MVT VT = Op.getSimpleValueType();
6028   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6029 
6030   // Create list of operands by converting existing ones to scalable types.
6031   SmallVector<SDValue, 6> Ops;
6032   for (const SDValue &V : Op->op_values()) {
6033     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6034 
6035     // Pass through non-vector operands.
6036     if (!V.getValueType().isVector()) {
6037       Ops.push_back(V);
6038       continue;
6039     }
6040 
6041     // "cast" fixed length vector to a scalable vector.
6042     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6043            "Only fixed length vectors are supported!");
6044     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6045   }
6046 
6047   SDLoc DL(Op);
6048   SDValue Mask, VL;
6049   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6050   if (HasMask)
6051     Ops.push_back(Mask);
6052   Ops.push_back(VL);
6053 
6054   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6055   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6056 }
6057 
6058 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6059 // * Operands of each node are assumed to be in the same order.
6060 // * The EVL operand is promoted from i32 to i64 on RV64.
6061 // * Fixed-length vectors are converted to their scalable-vector container
6062 //   types.
6063 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6064                                        unsigned RISCVISDOpc) const {
6065   SDLoc DL(Op);
6066   MVT VT = Op.getSimpleValueType();
6067   SmallVector<SDValue, 4> Ops;
6068 
6069   for (const auto &OpIdx : enumerate(Op->ops())) {
6070     SDValue V = OpIdx.value();
6071     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6072     // Pass through operands which aren't fixed-length vectors.
6073     if (!V.getValueType().isFixedLengthVector()) {
6074       Ops.push_back(V);
6075       continue;
6076     }
6077     // "cast" fixed length vector to a scalable vector.
6078     MVT OpVT = V.getSimpleValueType();
6079     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6080     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6081            "Only fixed length vectors are supported!");
6082     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6083   }
6084 
6085   if (!VT.isFixedLengthVector())
6086     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6087 
6088   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6089 
6090   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6091 
6092   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6093 }
6094 
6095 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6096                                             unsigned MaskOpc,
6097                                             unsigned VecOpc) const {
6098   MVT VT = Op.getSimpleValueType();
6099   if (VT.getVectorElementType() != MVT::i1)
6100     return lowerVPOp(Op, DAG, VecOpc);
6101 
6102   // It is safe to drop mask parameter as masked-off elements are undef.
6103   SDValue Op1 = Op->getOperand(0);
6104   SDValue Op2 = Op->getOperand(1);
6105   SDValue VL = Op->getOperand(3);
6106 
6107   MVT ContainerVT = VT;
6108   const bool IsFixed = VT.isFixedLengthVector();
6109   if (IsFixed) {
6110     ContainerVT = getContainerForFixedLengthVector(VT);
6111     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6112     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6113   }
6114 
6115   SDLoc DL(Op);
6116   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6117   if (!IsFixed)
6118     return Val;
6119   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6120 }
6121 
6122 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6123 // matched to a RVV indexed load. The RVV indexed load instructions only
6124 // support the "unsigned unscaled" addressing mode; indices are implicitly
6125 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6126 // signed or scaled indexing is extended to the XLEN value type and scaled
6127 // accordingly.
6128 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6129                                                SelectionDAG &DAG) const {
6130   SDLoc DL(Op);
6131   MVT VT = Op.getSimpleValueType();
6132 
6133   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6134   EVT MemVT = MemSD->getMemoryVT();
6135   MachineMemOperand *MMO = MemSD->getMemOperand();
6136   SDValue Chain = MemSD->getChain();
6137   SDValue BasePtr = MemSD->getBasePtr();
6138 
6139   ISD::LoadExtType LoadExtType;
6140   SDValue Index, Mask, PassThru, VL;
6141 
6142   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6143     Index = VPGN->getIndex();
6144     Mask = VPGN->getMask();
6145     PassThru = DAG.getUNDEF(VT);
6146     VL = VPGN->getVectorLength();
6147     // VP doesn't support extending loads.
6148     LoadExtType = ISD::NON_EXTLOAD;
6149   } else {
6150     // Else it must be a MGATHER.
6151     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6152     Index = MGN->getIndex();
6153     Mask = MGN->getMask();
6154     PassThru = MGN->getPassThru();
6155     LoadExtType = MGN->getExtensionType();
6156   }
6157 
6158   MVT IndexVT = Index.getSimpleValueType();
6159   MVT XLenVT = Subtarget.getXLenVT();
6160 
6161   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6162          "Unexpected VTs!");
6163   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6164   // Targets have to explicitly opt-in for extending vector loads.
6165   assert(LoadExtType == ISD::NON_EXTLOAD &&
6166          "Unexpected extending MGATHER/VP_GATHER");
6167   (void)LoadExtType;
6168 
6169   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6170   // the selection of the masked intrinsics doesn't do this for us.
6171   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6172 
6173   MVT ContainerVT = VT;
6174   if (VT.isFixedLengthVector()) {
6175     // We need to use the larger of the result and index type to determine the
6176     // scalable type to use so we don't increase LMUL for any operand/result.
6177     if (VT.bitsGE(IndexVT)) {
6178       ContainerVT = getContainerForFixedLengthVector(VT);
6179       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6180                                  ContainerVT.getVectorElementCount());
6181     } else {
6182       IndexVT = getContainerForFixedLengthVector(IndexVT);
6183       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6184                                      IndexVT.getVectorElementCount());
6185     }
6186 
6187     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6188 
6189     if (!IsUnmasked) {
6190       MVT MaskVT =
6191           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6192       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6193       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6194     }
6195   }
6196 
6197   if (!VL)
6198     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6199 
6200   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6201     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6202     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6203                                    VL);
6204     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6205                         TrueMask, VL);
6206   }
6207 
6208   unsigned IntID =
6209       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6210   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6211   if (IsUnmasked)
6212     Ops.push_back(DAG.getUNDEF(ContainerVT));
6213   else
6214     Ops.push_back(PassThru);
6215   Ops.push_back(BasePtr);
6216   Ops.push_back(Index);
6217   if (!IsUnmasked)
6218     Ops.push_back(Mask);
6219   Ops.push_back(VL);
6220   if (!IsUnmasked)
6221     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6222 
6223   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6224   SDValue Result =
6225       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6226   Chain = Result.getValue(1);
6227 
6228   if (VT.isFixedLengthVector())
6229     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6230 
6231   return DAG.getMergeValues({Result, Chain}, DL);
6232 }
6233 
6234 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6235 // matched to a RVV indexed store. The RVV indexed store instructions only
6236 // support the "unsigned unscaled" addressing mode; indices are implicitly
6237 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6238 // signed or scaled indexing is extended to the XLEN value type and scaled
6239 // accordingly.
6240 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6241                                                 SelectionDAG &DAG) const {
6242   SDLoc DL(Op);
6243   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6244   EVT MemVT = MemSD->getMemoryVT();
6245   MachineMemOperand *MMO = MemSD->getMemOperand();
6246   SDValue Chain = MemSD->getChain();
6247   SDValue BasePtr = MemSD->getBasePtr();
6248 
6249   bool IsTruncatingStore = false;
6250   SDValue Index, Mask, Val, VL;
6251 
6252   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6253     Index = VPSN->getIndex();
6254     Mask = VPSN->getMask();
6255     Val = VPSN->getValue();
6256     VL = VPSN->getVectorLength();
6257     // VP doesn't support truncating stores.
6258     IsTruncatingStore = false;
6259   } else {
6260     // Else it must be a MSCATTER.
6261     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6262     Index = MSN->getIndex();
6263     Mask = MSN->getMask();
6264     Val = MSN->getValue();
6265     IsTruncatingStore = MSN->isTruncatingStore();
6266   }
6267 
6268   MVT VT = Val.getSimpleValueType();
6269   MVT IndexVT = Index.getSimpleValueType();
6270   MVT XLenVT = Subtarget.getXLenVT();
6271 
6272   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6273          "Unexpected VTs!");
6274   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6275   // Targets have to explicitly opt-in for extending vector loads and
6276   // truncating vector stores.
6277   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6278   (void)IsTruncatingStore;
6279 
6280   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6281   // the selection of the masked intrinsics doesn't do this for us.
6282   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6283 
6284   MVT ContainerVT = VT;
6285   if (VT.isFixedLengthVector()) {
6286     // We need to use the larger of the value and index type to determine the
6287     // scalable type to use so we don't increase LMUL for any operand/result.
6288     if (VT.bitsGE(IndexVT)) {
6289       ContainerVT = getContainerForFixedLengthVector(VT);
6290       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6291                                  ContainerVT.getVectorElementCount());
6292     } else {
6293       IndexVT = getContainerForFixedLengthVector(IndexVT);
6294       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6295                                      IndexVT.getVectorElementCount());
6296     }
6297 
6298     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6299     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6300 
6301     if (!IsUnmasked) {
6302       MVT MaskVT =
6303           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6304       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6305     }
6306   }
6307 
6308   if (!VL)
6309     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6310 
6311   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6312     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6313     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6314                                    VL);
6315     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6316                         TrueMask, VL);
6317   }
6318 
6319   unsigned IntID =
6320       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6321   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6322   Ops.push_back(Val);
6323   Ops.push_back(BasePtr);
6324   Ops.push_back(Index);
6325   if (!IsUnmasked)
6326     Ops.push_back(Mask);
6327   Ops.push_back(VL);
6328 
6329   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6330                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6331 }
6332 
6333 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6334                                                SelectionDAG &DAG) const {
6335   const MVT XLenVT = Subtarget.getXLenVT();
6336   SDLoc DL(Op);
6337   SDValue Chain = Op->getOperand(0);
6338   SDValue SysRegNo = DAG.getTargetConstant(
6339       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6340   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6341   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6342 
6343   // Encoding used for rounding mode in RISCV differs from that used in
6344   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6345   // table, which consists of a sequence of 4-bit fields, each representing
6346   // corresponding FLT_ROUNDS mode.
6347   static const int Table =
6348       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6349       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6350       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6351       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6352       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6353 
6354   SDValue Shift =
6355       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6356   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6357                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6358   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6359                                DAG.getConstant(7, DL, XLenVT));
6360 
6361   return DAG.getMergeValues({Masked, Chain}, DL);
6362 }
6363 
6364 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6365                                                SelectionDAG &DAG) const {
6366   const MVT XLenVT = Subtarget.getXLenVT();
6367   SDLoc DL(Op);
6368   SDValue Chain = Op->getOperand(0);
6369   SDValue RMValue = Op->getOperand(1);
6370   SDValue SysRegNo = DAG.getTargetConstant(
6371       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6372 
6373   // Encoding used for rounding mode in RISCV differs from that used in
6374   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6375   // a table, which consists of a sequence of 4-bit fields, each representing
6376   // corresponding RISCV mode.
6377   static const unsigned Table =
6378       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6379       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6380       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6381       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6382       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6383 
6384   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6385                               DAG.getConstant(2, DL, XLenVT));
6386   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6387                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6388   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6389                         DAG.getConstant(0x7, DL, XLenVT));
6390   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6391                      RMValue);
6392 }
6393 
6394 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6395   switch (IntNo) {
6396   default:
6397     llvm_unreachable("Unexpected Intrinsic");
6398   case Intrinsic::riscv_grev:
6399     return RISCVISD::GREVW;
6400   case Intrinsic::riscv_gorc:
6401     return RISCVISD::GORCW;
6402   case Intrinsic::riscv_bcompress:
6403     return RISCVISD::BCOMPRESSW;
6404   case Intrinsic::riscv_bdecompress:
6405     return RISCVISD::BDECOMPRESSW;
6406   case Intrinsic::riscv_bfp:
6407     return RISCVISD::BFPW;
6408   case Intrinsic::riscv_fsl:
6409     return RISCVISD::FSLW;
6410   case Intrinsic::riscv_fsr:
6411     return RISCVISD::FSRW;
6412   }
6413 }
6414 
6415 // Converts the given intrinsic to a i64 operation with any extension.
6416 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6417                                          unsigned IntNo) {
6418   SDLoc DL(N);
6419   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6420   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6421   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6422   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6423   // ReplaceNodeResults requires we maintain the same type for the return value.
6424   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6425 }
6426 
6427 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6428 // form of the given Opcode.
6429 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6430   switch (Opcode) {
6431   default:
6432     llvm_unreachable("Unexpected opcode");
6433   case ISD::SHL:
6434     return RISCVISD::SLLW;
6435   case ISD::SRA:
6436     return RISCVISD::SRAW;
6437   case ISD::SRL:
6438     return RISCVISD::SRLW;
6439   case ISD::SDIV:
6440     return RISCVISD::DIVW;
6441   case ISD::UDIV:
6442     return RISCVISD::DIVUW;
6443   case ISD::UREM:
6444     return RISCVISD::REMUW;
6445   case ISD::ROTL:
6446     return RISCVISD::ROLW;
6447   case ISD::ROTR:
6448     return RISCVISD::RORW;
6449   case RISCVISD::GREV:
6450     return RISCVISD::GREVW;
6451   case RISCVISD::GORC:
6452     return RISCVISD::GORCW;
6453   }
6454 }
6455 
6456 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6457 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6458 // otherwise be promoted to i64, making it difficult to select the
6459 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6460 // type i8/i16/i32 is lost.
6461 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6462                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6463   SDLoc DL(N);
6464   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6465   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6466   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6467   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6468   // ReplaceNodeResults requires we maintain the same type for the return value.
6469   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6470 }
6471 
6472 // Converts the given 32-bit operation to a i64 operation with signed extension
6473 // semantic to reduce the signed extension instructions.
6474 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6475   SDLoc DL(N);
6476   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6477   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6478   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6479   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6480                                DAG.getValueType(MVT::i32));
6481   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6482 }
6483 
6484 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6485                                              SmallVectorImpl<SDValue> &Results,
6486                                              SelectionDAG &DAG) const {
6487   SDLoc DL(N);
6488   switch (N->getOpcode()) {
6489   default:
6490     llvm_unreachable("Don't know how to custom type legalize this operation!");
6491   case ISD::STRICT_FP_TO_SINT:
6492   case ISD::STRICT_FP_TO_UINT:
6493   case ISD::FP_TO_SINT:
6494   case ISD::FP_TO_UINT: {
6495     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6496            "Unexpected custom legalisation");
6497     bool IsStrict = N->isStrictFPOpcode();
6498     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6499                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6500     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6501     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6502         TargetLowering::TypeSoftenFloat) {
6503       if (!isTypeLegal(Op0.getValueType()))
6504         return;
6505       if (IsStrict) {
6506         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6507                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6508         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6509         SDValue Res = DAG.getNode(
6510             Opc, DL, VTs, N->getOperand(0), Op0,
6511             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6512         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6513         Results.push_back(Res.getValue(1));
6514         return;
6515       }
6516       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6517       SDValue Res =
6518           DAG.getNode(Opc, DL, MVT::i64, Op0,
6519                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6520       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6521       return;
6522     }
6523     // If the FP type needs to be softened, emit a library call using the 'si'
6524     // version. If we left it to default legalization we'd end up with 'di'. If
6525     // the FP type doesn't need to be softened just let generic type
6526     // legalization promote the result type.
6527     RTLIB::Libcall LC;
6528     if (IsSigned)
6529       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6530     else
6531       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6532     MakeLibCallOptions CallOptions;
6533     EVT OpVT = Op0.getValueType();
6534     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6535     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6536     SDValue Result;
6537     std::tie(Result, Chain) =
6538         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6539     Results.push_back(Result);
6540     if (IsStrict)
6541       Results.push_back(Chain);
6542     break;
6543   }
6544   case ISD::READCYCLECOUNTER: {
6545     assert(!Subtarget.is64Bit() &&
6546            "READCYCLECOUNTER only has custom type legalization on riscv32");
6547 
6548     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6549     SDValue RCW =
6550         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6551 
6552     Results.push_back(
6553         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6554     Results.push_back(RCW.getValue(2));
6555     break;
6556   }
6557   case ISD::MUL: {
6558     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6559     unsigned XLen = Subtarget.getXLen();
6560     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6561     if (Size > XLen) {
6562       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6563       SDValue LHS = N->getOperand(0);
6564       SDValue RHS = N->getOperand(1);
6565       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6566 
6567       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6568       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6569       // We need exactly one side to be unsigned.
6570       if (LHSIsU == RHSIsU)
6571         return;
6572 
6573       auto MakeMULPair = [&](SDValue S, SDValue U) {
6574         MVT XLenVT = Subtarget.getXLenVT();
6575         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6576         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6577         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6578         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6579         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6580       };
6581 
6582       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6583       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6584 
6585       // The other operand should be signed, but still prefer MULH when
6586       // possible.
6587       if (RHSIsU && LHSIsS && !RHSIsS)
6588         Results.push_back(MakeMULPair(LHS, RHS));
6589       else if (LHSIsU && RHSIsS && !LHSIsS)
6590         Results.push_back(MakeMULPair(RHS, LHS));
6591 
6592       return;
6593     }
6594     LLVM_FALLTHROUGH;
6595   }
6596   case ISD::ADD:
6597   case ISD::SUB:
6598     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6599            "Unexpected custom legalisation");
6600     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6601     break;
6602   case ISD::SHL:
6603   case ISD::SRA:
6604   case ISD::SRL:
6605     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6606            "Unexpected custom legalisation");
6607     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6608       Results.push_back(customLegalizeToWOp(N, DAG));
6609       break;
6610     }
6611 
6612     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6613     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6614     // shift amount.
6615     if (N->getOpcode() == ISD::SHL) {
6616       SDLoc DL(N);
6617       SDValue NewOp0 =
6618           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6619       SDValue NewOp1 =
6620           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6621       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6622       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6623                                    DAG.getValueType(MVT::i32));
6624       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6625     }
6626 
6627     break;
6628   case ISD::ROTL:
6629   case ISD::ROTR:
6630     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6631            "Unexpected custom legalisation");
6632     Results.push_back(customLegalizeToWOp(N, DAG));
6633     break;
6634   case ISD::CTTZ:
6635   case ISD::CTTZ_ZERO_UNDEF:
6636   case ISD::CTLZ:
6637   case ISD::CTLZ_ZERO_UNDEF: {
6638     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6639            "Unexpected custom legalisation");
6640 
6641     SDValue NewOp0 =
6642         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6643     bool IsCTZ =
6644         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6645     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6646     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6647     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6648     return;
6649   }
6650   case ISD::SDIV:
6651   case ISD::UDIV:
6652   case ISD::UREM: {
6653     MVT VT = N->getSimpleValueType(0);
6654     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6655            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6656            "Unexpected custom legalisation");
6657     // Don't promote division/remainder by constant since we should expand those
6658     // to multiply by magic constant.
6659     // FIXME: What if the expansion is disabled for minsize.
6660     if (N->getOperand(1).getOpcode() == ISD::Constant)
6661       return;
6662 
6663     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6664     // the upper 32 bits. For other types we need to sign or zero extend
6665     // based on the opcode.
6666     unsigned ExtOpc = ISD::ANY_EXTEND;
6667     if (VT != MVT::i32)
6668       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6669                                            : ISD::ZERO_EXTEND;
6670 
6671     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6672     break;
6673   }
6674   case ISD::UADDO:
6675   case ISD::USUBO: {
6676     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6677            "Unexpected custom legalisation");
6678     bool IsAdd = N->getOpcode() == ISD::UADDO;
6679     // Create an ADDW or SUBW.
6680     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6681     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6682     SDValue Res =
6683         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6684     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6685                       DAG.getValueType(MVT::i32));
6686 
6687     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6688     // Since the inputs are sign extended from i32, this is equivalent to
6689     // comparing the lower 32 bits.
6690     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6691     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6692                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6693 
6694     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6695     Results.push_back(Overflow);
6696     return;
6697   }
6698   case ISD::UADDSAT:
6699   case ISD::USUBSAT: {
6700     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6701            "Unexpected custom legalisation");
6702     if (Subtarget.hasStdExtZbb()) {
6703       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6704       // sign extend allows overflow of the lower 32 bits to be detected on
6705       // the promoted size.
6706       SDValue LHS =
6707           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6708       SDValue RHS =
6709           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6710       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6711       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6712       return;
6713     }
6714 
6715     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6716     // promotion for UADDO/USUBO.
6717     Results.push_back(expandAddSubSat(N, DAG));
6718     return;
6719   }
6720   case ISD::ABS: {
6721     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6722            "Unexpected custom legalisation");
6723           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6724 
6725     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6726 
6727     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6728 
6729     // Freeze the source so we can increase it's use count.
6730     Src = DAG.getFreeze(Src);
6731 
6732     // Copy sign bit to all bits using the sraiw pattern.
6733     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6734                                    DAG.getValueType(MVT::i32));
6735     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6736                            DAG.getConstant(31, DL, MVT::i64));
6737 
6738     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6739     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6740 
6741     // NOTE: The result is only required to be anyextended, but sext is
6742     // consistent with type legalization of sub.
6743     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6744                          DAG.getValueType(MVT::i32));
6745     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6746     return;
6747   }
6748   case ISD::BITCAST: {
6749     EVT VT = N->getValueType(0);
6750     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6751     SDValue Op0 = N->getOperand(0);
6752     EVT Op0VT = Op0.getValueType();
6753     MVT XLenVT = Subtarget.getXLenVT();
6754     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6755       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6756       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6757     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6758                Subtarget.hasStdExtF()) {
6759       SDValue FPConv =
6760           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6761       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6762     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6763                isTypeLegal(Op0VT)) {
6764       // Custom-legalize bitcasts from fixed-length vector types to illegal
6765       // scalar types in order to improve codegen. Bitcast the vector to a
6766       // one-element vector type whose element type is the same as the result
6767       // type, and extract the first element.
6768       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6769       if (isTypeLegal(BVT)) {
6770         SDValue BVec = DAG.getBitcast(BVT, Op0);
6771         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6772                                       DAG.getConstant(0, DL, XLenVT)));
6773       }
6774     }
6775     break;
6776   }
6777   case RISCVISD::GREV:
6778   case RISCVISD::GORC: {
6779     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6780            "Unexpected custom legalisation");
6781     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6782     // This is similar to customLegalizeToWOp, except that we pass the second
6783     // operand (a TargetConstant) straight through: it is already of type
6784     // XLenVT.
6785     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6786     SDValue NewOp0 =
6787         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6788     SDValue NewOp1 =
6789         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6790     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6791     // ReplaceNodeResults requires we maintain the same type for the return
6792     // value.
6793     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6794     break;
6795   }
6796   case RISCVISD::SHFL: {
6797     // There is no SHFLIW instruction, but we can just promote the operation.
6798     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6799            "Unexpected custom legalisation");
6800     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6801     SDValue NewOp0 =
6802         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6803     SDValue NewOp1 =
6804         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6805     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6806     // ReplaceNodeResults requires we maintain the same type for the return
6807     // value.
6808     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6809     break;
6810   }
6811   case ISD::BSWAP:
6812   case ISD::BITREVERSE: {
6813     MVT VT = N->getSimpleValueType(0);
6814     MVT XLenVT = Subtarget.getXLenVT();
6815     assert((VT == MVT::i8 || VT == MVT::i16 ||
6816             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6817            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6818     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6819     unsigned Imm = VT.getSizeInBits() - 1;
6820     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6821     if (N->getOpcode() == ISD::BSWAP)
6822       Imm &= ~0x7U;
6823     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6824     SDValue GREVI =
6825         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6826     // ReplaceNodeResults requires we maintain the same type for the return
6827     // value.
6828     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6829     break;
6830   }
6831   case ISD::FSHL:
6832   case ISD::FSHR: {
6833     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6834            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6835     SDValue NewOp0 =
6836         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6837     SDValue NewOp1 =
6838         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6839     SDValue NewShAmt =
6840         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6841     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6842     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6843     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6844                            DAG.getConstant(0x1f, DL, MVT::i64));
6845     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6846     // instruction use different orders. fshl will return its first operand for
6847     // shift of zero, fshr will return its second operand. fsl and fsr both
6848     // return rs1 so the ISD nodes need to have different operand orders.
6849     // Shift amount is in rs2.
6850     unsigned Opc = RISCVISD::FSLW;
6851     if (N->getOpcode() == ISD::FSHR) {
6852       std::swap(NewOp0, NewOp1);
6853       Opc = RISCVISD::FSRW;
6854     }
6855     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6856     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6857     break;
6858   }
6859   case ISD::EXTRACT_VECTOR_ELT: {
6860     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6861     // type is illegal (currently only vXi64 RV32).
6862     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6863     // transferred to the destination register. We issue two of these from the
6864     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6865     // first element.
6866     SDValue Vec = N->getOperand(0);
6867     SDValue Idx = N->getOperand(1);
6868 
6869     // The vector type hasn't been legalized yet so we can't issue target
6870     // specific nodes if it needs legalization.
6871     // FIXME: We would manually legalize if it's important.
6872     if (!isTypeLegal(Vec.getValueType()))
6873       return;
6874 
6875     MVT VecVT = Vec.getSimpleValueType();
6876 
6877     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6878            VecVT.getVectorElementType() == MVT::i64 &&
6879            "Unexpected EXTRACT_VECTOR_ELT legalization");
6880 
6881     // If this is a fixed vector, we need to convert it to a scalable vector.
6882     MVT ContainerVT = VecVT;
6883     if (VecVT.isFixedLengthVector()) {
6884       ContainerVT = getContainerForFixedLengthVector(VecVT);
6885       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6886     }
6887 
6888     MVT XLenVT = Subtarget.getXLenVT();
6889 
6890     // Use a VL of 1 to avoid processing more elements than we need.
6891     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6892     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6893     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6894 
6895     // Unless the index is known to be 0, we must slide the vector down to get
6896     // the desired element into index 0.
6897     if (!isNullConstant(Idx)) {
6898       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6899                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6900     }
6901 
6902     // Extract the lower XLEN bits of the correct vector element.
6903     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6904 
6905     // To extract the upper XLEN bits of the vector element, shift the first
6906     // element right by 32 bits and re-extract the lower XLEN bits.
6907     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6908                                      DAG.getUNDEF(ContainerVT),
6909                                      DAG.getConstant(32, DL, XLenVT), VL);
6910     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6911                                  ThirtyTwoV, Mask, VL);
6912 
6913     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6914 
6915     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6916     break;
6917   }
6918   case ISD::INTRINSIC_WO_CHAIN: {
6919     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6920     switch (IntNo) {
6921     default:
6922       llvm_unreachable(
6923           "Don't know how to custom type legalize this intrinsic!");
6924     case Intrinsic::riscv_grev:
6925     case Intrinsic::riscv_gorc:
6926     case Intrinsic::riscv_bcompress:
6927     case Intrinsic::riscv_bdecompress:
6928     case Intrinsic::riscv_bfp: {
6929       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6930              "Unexpected custom legalisation");
6931       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6932       break;
6933     }
6934     case Intrinsic::riscv_fsl:
6935     case Intrinsic::riscv_fsr: {
6936       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6937              "Unexpected custom legalisation");
6938       SDValue NewOp1 =
6939           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6940       SDValue NewOp2 =
6941           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6942       SDValue NewOp3 =
6943           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6944       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6945       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6946       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6947       break;
6948     }
6949     case Intrinsic::riscv_orc_b: {
6950       // Lower to the GORCI encoding for orc.b with the operand extended.
6951       SDValue NewOp =
6952           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6953       // If Zbp is enabled, use GORCIW which will sign extend the result.
6954       unsigned Opc =
6955           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6956       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6957                                 DAG.getConstant(7, DL, MVT::i64));
6958       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6959       return;
6960     }
6961     case Intrinsic::riscv_shfl:
6962     case Intrinsic::riscv_unshfl: {
6963       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6964              "Unexpected custom legalisation");
6965       SDValue NewOp1 =
6966           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6967       SDValue NewOp2 =
6968           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6969       unsigned Opc =
6970           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6971       // There is no (UN)SHFLIW. If the control word is a constant, we can use
6972       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
6973       // will be shuffled the same way as the lower 32 bit half, but the two
6974       // halves won't cross.
6975       if (isa<ConstantSDNode>(NewOp2)) {
6976         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6977                              DAG.getConstant(0xf, DL, MVT::i64));
6978         Opc =
6979             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6980       }
6981       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6982       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6983       break;
6984     }
6985     case Intrinsic::riscv_vmv_x_s: {
6986       EVT VT = N->getValueType(0);
6987       MVT XLenVT = Subtarget.getXLenVT();
6988       if (VT.bitsLT(XLenVT)) {
6989         // Simple case just extract using vmv.x.s and truncate.
6990         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6991                                       Subtarget.getXLenVT(), N->getOperand(1));
6992         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6993         return;
6994       }
6995 
6996       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6997              "Unexpected custom legalization");
6998 
6999       // We need to do the move in two steps.
7000       SDValue Vec = N->getOperand(1);
7001       MVT VecVT = Vec.getSimpleValueType();
7002 
7003       // First extract the lower XLEN bits of the element.
7004       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7005 
7006       // To extract the upper XLEN bits of the vector element, shift the first
7007       // element right by 32 bits and re-extract the lower XLEN bits.
7008       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7009       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7010       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
7011       SDValue ThirtyTwoV =
7012           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7013                       DAG.getConstant(32, DL, XLenVT), VL);
7014       SDValue LShr32 =
7015           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7016       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7017 
7018       Results.push_back(
7019           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7020       break;
7021     }
7022     }
7023     break;
7024   }
7025   case ISD::VECREDUCE_ADD:
7026   case ISD::VECREDUCE_AND:
7027   case ISD::VECREDUCE_OR:
7028   case ISD::VECREDUCE_XOR:
7029   case ISD::VECREDUCE_SMAX:
7030   case ISD::VECREDUCE_UMAX:
7031   case ISD::VECREDUCE_SMIN:
7032   case ISD::VECREDUCE_UMIN:
7033     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7034       Results.push_back(V);
7035     break;
7036   case ISD::VP_REDUCE_ADD:
7037   case ISD::VP_REDUCE_AND:
7038   case ISD::VP_REDUCE_OR:
7039   case ISD::VP_REDUCE_XOR:
7040   case ISD::VP_REDUCE_SMAX:
7041   case ISD::VP_REDUCE_UMAX:
7042   case ISD::VP_REDUCE_SMIN:
7043   case ISD::VP_REDUCE_UMIN:
7044     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7045       Results.push_back(V);
7046     break;
7047   case ISD::FLT_ROUNDS_: {
7048     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7049     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7050     Results.push_back(Res.getValue(0));
7051     Results.push_back(Res.getValue(1));
7052     break;
7053   }
7054   }
7055 }
7056 
7057 // A structure to hold one of the bit-manipulation patterns below. Together, a
7058 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7059 //   (or (and (shl x, 1), 0xAAAAAAAA),
7060 //       (and (srl x, 1), 0x55555555))
7061 struct RISCVBitmanipPat {
7062   SDValue Op;
7063   unsigned ShAmt;
7064   bool IsSHL;
7065 
7066   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7067     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7068   }
7069 };
7070 
7071 // Matches patterns of the form
7072 //   (and (shl x, C2), (C1 << C2))
7073 //   (and (srl x, C2), C1)
7074 //   (shl (and x, C1), C2)
7075 //   (srl (and x, (C1 << C2)), C2)
7076 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7077 // The expected masks for each shift amount are specified in BitmanipMasks where
7078 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7079 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7080 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7081 // XLen is 64.
7082 static Optional<RISCVBitmanipPat>
7083 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7084   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7085          "Unexpected number of masks");
7086   Optional<uint64_t> Mask;
7087   // Optionally consume a mask around the shift operation.
7088   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7089     Mask = Op.getConstantOperandVal(1);
7090     Op = Op.getOperand(0);
7091   }
7092   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7093     return None;
7094   bool IsSHL = Op.getOpcode() == ISD::SHL;
7095 
7096   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7097     return None;
7098   uint64_t ShAmt = Op.getConstantOperandVal(1);
7099 
7100   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7101   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7102     return None;
7103   // If we don't have enough masks for 64 bit, then we must be trying to
7104   // match SHFL so we're only allowed to shift 1/4 of the width.
7105   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7106     return None;
7107 
7108   SDValue Src = Op.getOperand(0);
7109 
7110   // The expected mask is shifted left when the AND is found around SHL
7111   // patterns.
7112   //   ((x >> 1) & 0x55555555)
7113   //   ((x << 1) & 0xAAAAAAAA)
7114   bool SHLExpMask = IsSHL;
7115 
7116   if (!Mask) {
7117     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7118     // the mask is all ones: consume that now.
7119     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7120       Mask = Src.getConstantOperandVal(1);
7121       Src = Src.getOperand(0);
7122       // The expected mask is now in fact shifted left for SRL, so reverse the
7123       // decision.
7124       //   ((x & 0xAAAAAAAA) >> 1)
7125       //   ((x & 0x55555555) << 1)
7126       SHLExpMask = !SHLExpMask;
7127     } else {
7128       // Use a default shifted mask of all-ones if there's no AND, truncated
7129       // down to the expected width. This simplifies the logic later on.
7130       Mask = maskTrailingOnes<uint64_t>(Width);
7131       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7132     }
7133   }
7134 
7135   unsigned MaskIdx = Log2_32(ShAmt);
7136   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7137 
7138   if (SHLExpMask)
7139     ExpMask <<= ShAmt;
7140 
7141   if (Mask != ExpMask)
7142     return None;
7143 
7144   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7145 }
7146 
7147 // Matches any of the following bit-manipulation patterns:
7148 //   (and (shl x, 1), (0x55555555 << 1))
7149 //   (and (srl x, 1), 0x55555555)
7150 //   (shl (and x, 0x55555555), 1)
7151 //   (srl (and x, (0x55555555 << 1)), 1)
7152 // where the shift amount and mask may vary thus:
7153 //   [1]  = 0x55555555 / 0xAAAAAAAA
7154 //   [2]  = 0x33333333 / 0xCCCCCCCC
7155 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7156 //   [8]  = 0x00FF00FF / 0xFF00FF00
7157 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7158 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7159 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7160   // These are the unshifted masks which we use to match bit-manipulation
7161   // patterns. They may be shifted left in certain circumstances.
7162   static const uint64_t BitmanipMasks[] = {
7163       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7164       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7165 
7166   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7167 }
7168 
7169 // Match the following pattern as a GREVI(W) operation
7170 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7171 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7172                                const RISCVSubtarget &Subtarget) {
7173   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7174   EVT VT = Op.getValueType();
7175 
7176   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7177     auto LHS = matchGREVIPat(Op.getOperand(0));
7178     auto RHS = matchGREVIPat(Op.getOperand(1));
7179     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7180       SDLoc DL(Op);
7181       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7182                          DAG.getConstant(LHS->ShAmt, DL, VT));
7183     }
7184   }
7185   return SDValue();
7186 }
7187 
7188 // Matches any the following pattern as a GORCI(W) operation
7189 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7190 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7191 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7192 // Note that with the variant of 3.,
7193 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7194 // the inner pattern will first be matched as GREVI and then the outer
7195 // pattern will be matched to GORC via the first rule above.
7196 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7197 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7198                                const RISCVSubtarget &Subtarget) {
7199   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7200   EVT VT = Op.getValueType();
7201 
7202   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7203     SDLoc DL(Op);
7204     SDValue Op0 = Op.getOperand(0);
7205     SDValue Op1 = Op.getOperand(1);
7206 
7207     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7208       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7209           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7210           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7211         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7212       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7213       if ((Reverse.getOpcode() == ISD::ROTL ||
7214            Reverse.getOpcode() == ISD::ROTR) &&
7215           Reverse.getOperand(0) == X &&
7216           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7217         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7218         if (RotAmt == (VT.getSizeInBits() / 2))
7219           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7220                              DAG.getConstant(RotAmt, DL, VT));
7221       }
7222       return SDValue();
7223     };
7224 
7225     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7226     if (SDValue V = MatchOROfReverse(Op0, Op1))
7227       return V;
7228     if (SDValue V = MatchOROfReverse(Op1, Op0))
7229       return V;
7230 
7231     // OR is commutable so canonicalize its OR operand to the left
7232     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7233       std::swap(Op0, Op1);
7234     if (Op0.getOpcode() != ISD::OR)
7235       return SDValue();
7236     SDValue OrOp0 = Op0.getOperand(0);
7237     SDValue OrOp1 = Op0.getOperand(1);
7238     auto LHS = matchGREVIPat(OrOp0);
7239     // OR is commutable so swap the operands and try again: x might have been
7240     // on the left
7241     if (!LHS) {
7242       std::swap(OrOp0, OrOp1);
7243       LHS = matchGREVIPat(OrOp0);
7244     }
7245     auto RHS = matchGREVIPat(Op1);
7246     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7247       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7248                          DAG.getConstant(LHS->ShAmt, DL, VT));
7249     }
7250   }
7251   return SDValue();
7252 }
7253 
7254 // Matches any of the following bit-manipulation patterns:
7255 //   (and (shl x, 1), (0x22222222 << 1))
7256 //   (and (srl x, 1), 0x22222222)
7257 //   (shl (and x, 0x22222222), 1)
7258 //   (srl (and x, (0x22222222 << 1)), 1)
7259 // where the shift amount and mask may vary thus:
7260 //   [1]  = 0x22222222 / 0x44444444
7261 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7262 //   [4]  = 0x00F000F0 / 0x0F000F00
7263 //   [8]  = 0x0000FF00 / 0x00FF0000
7264 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7265 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7266   // These are the unshifted masks which we use to match bit-manipulation
7267   // patterns. They may be shifted left in certain circumstances.
7268   static const uint64_t BitmanipMasks[] = {
7269       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7270       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7271 
7272   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7273 }
7274 
7275 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7276 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7277                                const RISCVSubtarget &Subtarget) {
7278   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7279   EVT VT = Op.getValueType();
7280 
7281   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7282     return SDValue();
7283 
7284   SDValue Op0 = Op.getOperand(0);
7285   SDValue Op1 = Op.getOperand(1);
7286 
7287   // Or is commutable so canonicalize the second OR to the LHS.
7288   if (Op0.getOpcode() != ISD::OR)
7289     std::swap(Op0, Op1);
7290   if (Op0.getOpcode() != ISD::OR)
7291     return SDValue();
7292 
7293   // We found an inner OR, so our operands are the operands of the inner OR
7294   // and the other operand of the outer OR.
7295   SDValue A = Op0.getOperand(0);
7296   SDValue B = Op0.getOperand(1);
7297   SDValue C = Op1;
7298 
7299   auto Match1 = matchSHFLPat(A);
7300   auto Match2 = matchSHFLPat(B);
7301 
7302   // If neither matched, we failed.
7303   if (!Match1 && !Match2)
7304     return SDValue();
7305 
7306   // We had at least one match. if one failed, try the remaining C operand.
7307   if (!Match1) {
7308     std::swap(A, C);
7309     Match1 = matchSHFLPat(A);
7310     if (!Match1)
7311       return SDValue();
7312   } else if (!Match2) {
7313     std::swap(B, C);
7314     Match2 = matchSHFLPat(B);
7315     if (!Match2)
7316       return SDValue();
7317   }
7318   assert(Match1 && Match2);
7319 
7320   // Make sure our matches pair up.
7321   if (!Match1->formsPairWith(*Match2))
7322     return SDValue();
7323 
7324   // All the remains is to make sure C is an AND with the same input, that masks
7325   // out the bits that are being shuffled.
7326   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7327       C.getOperand(0) != Match1->Op)
7328     return SDValue();
7329 
7330   uint64_t Mask = C.getConstantOperandVal(1);
7331 
7332   static const uint64_t BitmanipMasks[] = {
7333       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7334       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7335   };
7336 
7337   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7338   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7339   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7340 
7341   if (Mask != ExpMask)
7342     return SDValue();
7343 
7344   SDLoc DL(Op);
7345   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7346                      DAG.getConstant(Match1->ShAmt, DL, VT));
7347 }
7348 
7349 // Optimize (add (shl x, c0), (shl y, c1)) ->
7350 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7351 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7352                                   const RISCVSubtarget &Subtarget) {
7353   // Perform this optimization only in the zba extension.
7354   if (!Subtarget.hasStdExtZba())
7355     return SDValue();
7356 
7357   // Skip for vector types and larger types.
7358   EVT VT = N->getValueType(0);
7359   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7360     return SDValue();
7361 
7362   // The two operand nodes must be SHL and have no other use.
7363   SDValue N0 = N->getOperand(0);
7364   SDValue N1 = N->getOperand(1);
7365   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7366       !N0->hasOneUse() || !N1->hasOneUse())
7367     return SDValue();
7368 
7369   // Check c0 and c1.
7370   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7371   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7372   if (!N0C || !N1C)
7373     return SDValue();
7374   int64_t C0 = N0C->getSExtValue();
7375   int64_t C1 = N1C->getSExtValue();
7376   if (C0 <= 0 || C1 <= 0)
7377     return SDValue();
7378 
7379   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7380   int64_t Bits = std::min(C0, C1);
7381   int64_t Diff = std::abs(C0 - C1);
7382   if (Diff != 1 && Diff != 2 && Diff != 3)
7383     return SDValue();
7384 
7385   // Build nodes.
7386   SDLoc DL(N);
7387   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7388   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7389   SDValue NA0 =
7390       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7391   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7392   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7393 }
7394 
7395 // Combine
7396 // ROTR ((GREV x, 24), 16) -> (GREVI x, 8) for RV32
7397 // ROTL ((GREV x, 24), 16) -> (GREVI x, 8) for RV32
7398 // ROTR ((GREV x, 56), 32) -> (GREVI x, 24) for RV64
7399 // ROTL ((GREV x, 56), 32) -> (GREVI x, 24) for RV64
7400 // RORW ((GREVW x, 24), 16) -> (GREVIW x, 8) for RV64
7401 // ROLW ((GREVW x, 24), 16) -> (GREVIW x, 8) for RV64
7402 // The grev patterns represents BSWAP.
7403 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7404 // off the grev.
7405 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7406                                           const RISCVSubtarget &Subtarget) {
7407   bool IsWInstruction =
7408       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7409   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7410           IsWInstruction) &&
7411          "Unexpected opcode!");
7412   SDValue Src = N->getOperand(0);
7413   EVT VT = N->getValueType(0);
7414   SDLoc DL(N);
7415 
7416   if (!Subtarget.hasStdExtZbp())
7417     return SDValue();
7418 
7419   unsigned GrevOpc = IsWInstruction ? RISCVISD::GREVW : RISCVISD::GREV;
7420   if (Src.getOpcode() != GrevOpc)
7421     return SDValue();
7422 
7423   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7424       !isa<ConstantSDNode>(Src.getOperand(1)))
7425     return SDValue();
7426 
7427   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7428   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7429 
7430   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7431   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7432   unsigned ShAmt1 = N->getConstantOperandVal(1);
7433   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7434   if (BitWidth < 16 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7435     return SDValue();
7436 
7437   Src = Src.getOperand(0);
7438 
7439   // Toggle bit the MSB of the shift.
7440   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7441   if (CombinedShAmt == 0)
7442     return Src;
7443 
7444   return DAG.getNode(
7445       GrevOpc, DL, VT, Src,
7446       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7447 }
7448 
7449 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7450 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7451 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7452 // not undo itself, but they are redundant.
7453 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7454   SDValue Src = N->getOperand(0);
7455 
7456   if (Src.getOpcode() != N->getOpcode())
7457     return SDValue();
7458 
7459   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7460       !isa<ConstantSDNode>(Src.getOperand(1)))
7461     return SDValue();
7462 
7463   unsigned ShAmt1 = N->getConstantOperandVal(1);
7464   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7465   Src = Src.getOperand(0);
7466 
7467   unsigned CombinedShAmt;
7468   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7469     CombinedShAmt = ShAmt1 | ShAmt2;
7470   else
7471     CombinedShAmt = ShAmt1 ^ ShAmt2;
7472 
7473   if (CombinedShAmt == 0)
7474     return Src;
7475 
7476   SDLoc DL(N);
7477   return DAG.getNode(
7478       N->getOpcode(), DL, N->getValueType(0), Src,
7479       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7480 }
7481 
7482 // Combine a constant select operand into its use:
7483 //
7484 // (and (select cond, -1, c), x)
7485 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7486 // (or  (select cond, 0, c), x)
7487 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7488 // (xor (select cond, 0, c), x)
7489 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7490 // (add (select cond, 0, c), x)
7491 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7492 // (sub x, (select cond, 0, c))
7493 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7494 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7495                                    SelectionDAG &DAG, bool AllOnes) {
7496   EVT VT = N->getValueType(0);
7497 
7498   // Skip vectors.
7499   if (VT.isVector())
7500     return SDValue();
7501 
7502   if ((Slct.getOpcode() != ISD::SELECT &&
7503        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7504       !Slct.hasOneUse())
7505     return SDValue();
7506 
7507   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7508     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7509   };
7510 
7511   bool SwapSelectOps;
7512   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7513   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7514   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7515   SDValue NonConstantVal;
7516   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7517     SwapSelectOps = false;
7518     NonConstantVal = FalseVal;
7519   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7520     SwapSelectOps = true;
7521     NonConstantVal = TrueVal;
7522   } else
7523     return SDValue();
7524 
7525   // Slct is now know to be the desired identity constant when CC is true.
7526   TrueVal = OtherOp;
7527   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7528   // Unless SwapSelectOps says the condition should be false.
7529   if (SwapSelectOps)
7530     std::swap(TrueVal, FalseVal);
7531 
7532   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7533     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7534                        {Slct.getOperand(0), Slct.getOperand(1),
7535                         Slct.getOperand(2), TrueVal, FalseVal});
7536 
7537   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7538                      {Slct.getOperand(0), TrueVal, FalseVal});
7539 }
7540 
7541 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7542 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7543                                               bool AllOnes) {
7544   SDValue N0 = N->getOperand(0);
7545   SDValue N1 = N->getOperand(1);
7546   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7547     return Result;
7548   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7549     return Result;
7550   return SDValue();
7551 }
7552 
7553 // Transform (add (mul x, c0), c1) ->
7554 //           (add (mul (add x, c1/c0), c0), c1%c0).
7555 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7556 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7557 // to an infinite loop in DAGCombine if transformed.
7558 // Or transform (add (mul x, c0), c1) ->
7559 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7560 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7561 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7562 // lead to an infinite loop in DAGCombine if transformed.
7563 // Or transform (add (mul x, c0), c1) ->
7564 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7565 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7566 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7567 // lead to an infinite loop in DAGCombine if transformed.
7568 // Or transform (add (mul x, c0), c1) ->
7569 //              (mul (add x, c1/c0), c0).
7570 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7571 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7572                                      const RISCVSubtarget &Subtarget) {
7573   // Skip for vector types and larger types.
7574   EVT VT = N->getValueType(0);
7575   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7576     return SDValue();
7577   // The first operand node must be a MUL and has no other use.
7578   SDValue N0 = N->getOperand(0);
7579   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7580     return SDValue();
7581   // Check if c0 and c1 match above conditions.
7582   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7583   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7584   if (!N0C || !N1C)
7585     return SDValue();
7586   // If N0C has multiple uses it's possible one of the cases in
7587   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7588   // in an infinite loop.
7589   if (!N0C->hasOneUse())
7590     return SDValue();
7591   int64_t C0 = N0C->getSExtValue();
7592   int64_t C1 = N1C->getSExtValue();
7593   int64_t CA, CB;
7594   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7595     return SDValue();
7596   // Search for proper CA (non-zero) and CB that both are simm12.
7597   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7598       !isInt<12>(C0 * (C1 / C0))) {
7599     CA = C1 / C0;
7600     CB = C1 % C0;
7601   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7602              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7603     CA = C1 / C0 + 1;
7604     CB = C1 % C0 - C0;
7605   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7606              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7607     CA = C1 / C0 - 1;
7608     CB = C1 % C0 + C0;
7609   } else
7610     return SDValue();
7611   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7612   SDLoc DL(N);
7613   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7614                              DAG.getConstant(CA, DL, VT));
7615   SDValue New1 =
7616       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7617   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7618 }
7619 
7620 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7621                                  const RISCVSubtarget &Subtarget) {
7622   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7623     return V;
7624   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7625     return V;
7626   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7627   //      (select lhs, rhs, cc, x, (add x, y))
7628   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7629 }
7630 
7631 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7632   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7633   //      (select lhs, rhs, cc, x, (sub x, y))
7634   SDValue N0 = N->getOperand(0);
7635   SDValue N1 = N->getOperand(1);
7636   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7637 }
7638 
7639 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7640   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7641   //      (select lhs, rhs, cc, x, (and x, y))
7642   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7643 }
7644 
7645 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7646                                 const RISCVSubtarget &Subtarget) {
7647   if (Subtarget.hasStdExtZbp()) {
7648     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7649       return GREV;
7650     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7651       return GORC;
7652     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7653       return SHFL;
7654   }
7655 
7656   // fold (or (select cond, 0, y), x) ->
7657   //      (select cond, x, (or x, y))
7658   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7659 }
7660 
7661 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7662   // fold (xor (select cond, 0, y), x) ->
7663   //      (select cond, x, (xor x, y))
7664   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7665 }
7666 
7667 static SDValue
7668 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
7669                                 const RISCVSubtarget &Subtarget) {
7670   SDValue Src = N->getOperand(0);
7671   EVT VT = N->getValueType(0);
7672 
7673   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
7674   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7675       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
7676     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
7677                        Src.getOperand(0));
7678 
7679   // Fold (i64 (sext_inreg (abs X), i32)) ->
7680   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
7681   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
7682   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
7683   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
7684   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
7685   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
7686   // may get combined into an earlier operation so we need to use
7687   // ComputeNumSignBits.
7688   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
7689   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
7690   // we can't assume that X has 33 sign bits. We must check.
7691   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
7692       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
7693       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
7694       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
7695     SDLoc DL(N);
7696     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
7697     SDValue Neg =
7698         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
7699     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
7700                       DAG.getValueType(MVT::i32));
7701     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
7702   }
7703 
7704   return SDValue();
7705 }
7706 
7707 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7708 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7709 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7710 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7711 // ADDW/SUBW/MULW.
7712 static SDValue performANY_EXTENDCombine(SDNode *N,
7713                                         TargetLowering::DAGCombinerInfo &DCI,
7714                                         const RISCVSubtarget &Subtarget) {
7715   if (!Subtarget.is64Bit())
7716     return SDValue();
7717 
7718   SelectionDAG &DAG = DCI.DAG;
7719 
7720   SDValue Src = N->getOperand(0);
7721   EVT VT = N->getValueType(0);
7722   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7723     return SDValue();
7724 
7725   // The opcode must be one that can implicitly sign_extend.
7726   // FIXME: Additional opcodes.
7727   switch (Src.getOpcode()) {
7728   default:
7729     return SDValue();
7730   case ISD::MUL:
7731     if (!Subtarget.hasStdExtM())
7732       return SDValue();
7733     LLVM_FALLTHROUGH;
7734   case ISD::ADD:
7735   case ISD::SUB:
7736     break;
7737   }
7738 
7739   // Only handle cases where the result is used by a CopyToReg. That likely
7740   // means the value is a liveout of the basic block. This helps prevent
7741   // infinite combine loops like PR51206.
7742   if (none_of(N->uses(),
7743               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7744     return SDValue();
7745 
7746   SmallVector<SDNode *, 4> SetCCs;
7747   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7748                             UE = Src.getNode()->use_end();
7749        UI != UE; ++UI) {
7750     SDNode *User = *UI;
7751     if (User == N)
7752       continue;
7753     if (UI.getUse().getResNo() != Src.getResNo())
7754       continue;
7755     // All i32 setccs are legalized by sign extending operands.
7756     if (User->getOpcode() == ISD::SETCC) {
7757       SetCCs.push_back(User);
7758       continue;
7759     }
7760     // We don't know if we can extend this user.
7761     break;
7762   }
7763 
7764   // If we don't have any SetCCs, this isn't worthwhile.
7765   if (SetCCs.empty())
7766     return SDValue();
7767 
7768   SDLoc DL(N);
7769   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7770   DCI.CombineTo(N, SExt);
7771 
7772   // Promote all the setccs.
7773   for (SDNode *SetCC : SetCCs) {
7774     SmallVector<SDValue, 4> Ops;
7775 
7776     for (unsigned j = 0; j != 2; ++j) {
7777       SDValue SOp = SetCC->getOperand(j);
7778       if (SOp == Src)
7779         Ops.push_back(SExt);
7780       else
7781         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7782     }
7783 
7784     Ops.push_back(SetCC->getOperand(2));
7785     DCI.CombineTo(SetCC,
7786                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7787   }
7788   return SDValue(N, 0);
7789 }
7790 
7791 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7792 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7793 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7794                                              bool Commute = false) {
7795   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7796           N->getOpcode() == RISCVISD::SUB_VL) &&
7797          "Unexpected opcode");
7798   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7799   SDValue Op0 = N->getOperand(0);
7800   SDValue Op1 = N->getOperand(1);
7801   if (Commute)
7802     std::swap(Op0, Op1);
7803 
7804   MVT VT = N->getSimpleValueType(0);
7805 
7806   // Determine the narrow size for a widening add/sub.
7807   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7808   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7809                                   VT.getVectorElementCount());
7810 
7811   SDValue Mask = N->getOperand(2);
7812   SDValue VL = N->getOperand(3);
7813 
7814   SDLoc DL(N);
7815 
7816   // If the RHS is a sext or zext, we can form a widening op.
7817   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7818        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7819       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7820     unsigned ExtOpc = Op1.getOpcode();
7821     Op1 = Op1.getOperand(0);
7822     // Re-introduce narrower extends if needed.
7823     if (Op1.getValueType() != NarrowVT)
7824       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7825 
7826     unsigned WOpc;
7827     if (ExtOpc == RISCVISD::VSEXT_VL)
7828       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7829     else
7830       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7831 
7832     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7833   }
7834 
7835   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7836   // sext/zext?
7837 
7838   return SDValue();
7839 }
7840 
7841 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7842 // vwsub(u).vv/vx.
7843 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7844   SDValue Op0 = N->getOperand(0);
7845   SDValue Op1 = N->getOperand(1);
7846   SDValue Mask = N->getOperand(2);
7847   SDValue VL = N->getOperand(3);
7848 
7849   MVT VT = N->getSimpleValueType(0);
7850   MVT NarrowVT = Op1.getSimpleValueType();
7851   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7852 
7853   unsigned VOpc;
7854   switch (N->getOpcode()) {
7855   default: llvm_unreachable("Unexpected opcode");
7856   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7857   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7858   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7859   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7860   }
7861 
7862   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7863                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7864 
7865   SDLoc DL(N);
7866 
7867   // If the LHS is a sext or zext, we can narrow this op to the same size as
7868   // the RHS.
7869   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7870        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7871       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7872     unsigned ExtOpc = Op0.getOpcode();
7873     Op0 = Op0.getOperand(0);
7874     // Re-introduce narrower extends if needed.
7875     if (Op0.getValueType() != NarrowVT)
7876       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7877     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7878   }
7879 
7880   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7881                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7882 
7883   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7884   // to commute and use a vwadd(u).vx instead.
7885   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7886       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
7887     Op0 = Op0.getOperand(1);
7888 
7889     // See if have enough sign bits or zero bits in the scalar to use a
7890     // widening add/sub by splatting to smaller element size.
7891     unsigned EltBits = VT.getScalarSizeInBits();
7892     unsigned ScalarBits = Op0.getValueSizeInBits();
7893     // Make sure we're getting all element bits from the scalar register.
7894     // FIXME: Support implicit sign extension of vmv.v.x?
7895     if (ScalarBits < EltBits)
7896       return SDValue();
7897 
7898     if (IsSigned) {
7899       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7900         return SDValue();
7901     } else {
7902       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7903       if (!DAG.MaskedValueIsZero(Op0, Mask))
7904         return SDValue();
7905     }
7906 
7907     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7908                       DAG.getUNDEF(NarrowVT), Op0, VL);
7909     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7910   }
7911 
7912   return SDValue();
7913 }
7914 
7915 // Try to form VWMUL, VWMULU or VWMULSU.
7916 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7917 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7918                                        bool Commute) {
7919   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7920   SDValue Op0 = N->getOperand(0);
7921   SDValue Op1 = N->getOperand(1);
7922   if (Commute)
7923     std::swap(Op0, Op1);
7924 
7925   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7926   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7927   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7928   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7929     return SDValue();
7930 
7931   SDValue Mask = N->getOperand(2);
7932   SDValue VL = N->getOperand(3);
7933 
7934   // Make sure the mask and VL match.
7935   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7936     return SDValue();
7937 
7938   MVT VT = N->getSimpleValueType(0);
7939 
7940   // Determine the narrow size for a widening multiply.
7941   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7942   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7943                                   VT.getVectorElementCount());
7944 
7945   SDLoc DL(N);
7946 
7947   // See if the other operand is the same opcode.
7948   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7949     if (!Op1.hasOneUse())
7950       return SDValue();
7951 
7952     // Make sure the mask and VL match.
7953     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7954       return SDValue();
7955 
7956     Op1 = Op1.getOperand(0);
7957   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7958     // The operand is a splat of a scalar.
7959 
7960     // The pasthru must be undef for tail agnostic
7961     if (!Op1.getOperand(0).isUndef())
7962       return SDValue();
7963     // The VL must be the same.
7964     if (Op1.getOperand(2) != VL)
7965       return SDValue();
7966 
7967     // Get the scalar value.
7968     Op1 = Op1.getOperand(1);
7969 
7970     // See if have enough sign bits or zero bits in the scalar to use a
7971     // widening multiply by splatting to smaller element size.
7972     unsigned EltBits = VT.getScalarSizeInBits();
7973     unsigned ScalarBits = Op1.getValueSizeInBits();
7974     // Make sure we're getting all element bits from the scalar register.
7975     // FIXME: Support implicit sign extension of vmv.v.x?
7976     if (ScalarBits < EltBits)
7977       return SDValue();
7978 
7979     // If the LHS is a sign extend, try to use vwmul.
7980     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
7981       // Can use vwmul.
7982     } else {
7983       // Otherwise try to use vwmulu or vwmulsu.
7984       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7985       if (DAG.MaskedValueIsZero(Op1, Mask))
7986         IsVWMULSU = IsSignExt;
7987       else
7988         return SDValue();
7989     }
7990 
7991     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7992                       DAG.getUNDEF(NarrowVT), Op1, VL);
7993   } else
7994     return SDValue();
7995 
7996   Op0 = Op0.getOperand(0);
7997 
7998   // Re-introduce narrower extends if needed.
7999   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8000   if (Op0.getValueType() != NarrowVT)
8001     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8002   // vwmulsu requires second operand to be zero extended.
8003   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8004   if (Op1.getValueType() != NarrowVT)
8005     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8006 
8007   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8008   if (!IsVWMULSU)
8009     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8010   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8011 }
8012 
8013 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8014   switch (Op.getOpcode()) {
8015   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8016   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8017   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8018   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8019   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8020   }
8021 
8022   return RISCVFPRndMode::Invalid;
8023 }
8024 
8025 // Fold
8026 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8027 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8028 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8029 //   (fp_to_int (fceil X))      -> fcvt X, rup
8030 //   (fp_to_int (fround X))     -> fcvt X, rmm
8031 static SDValue performFP_TO_INTCombine(SDNode *N,
8032                                        TargetLowering::DAGCombinerInfo &DCI,
8033                                        const RISCVSubtarget &Subtarget) {
8034   SelectionDAG &DAG = DCI.DAG;
8035   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8036   MVT XLenVT = Subtarget.getXLenVT();
8037 
8038   // Only handle XLen or i32 types. Other types narrower than XLen will
8039   // eventually be legalized to XLenVT.
8040   EVT VT = N->getValueType(0);
8041   if (VT != MVT::i32 && VT != XLenVT)
8042     return SDValue();
8043 
8044   SDValue Src = N->getOperand(0);
8045 
8046   // Ensure the FP type is also legal.
8047   if (!TLI.isTypeLegal(Src.getValueType()))
8048     return SDValue();
8049 
8050   // Don't do this for f16 with Zfhmin and not Zfh.
8051   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8052     return SDValue();
8053 
8054   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8055   if (FRM == RISCVFPRndMode::Invalid)
8056     return SDValue();
8057 
8058   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8059 
8060   unsigned Opc;
8061   if (VT == XLenVT)
8062     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8063   else
8064     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8065 
8066   SDLoc DL(N);
8067   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8068                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8069   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8070 }
8071 
8072 // Fold
8073 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8074 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8075 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8076 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8077 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8078 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8079                                        TargetLowering::DAGCombinerInfo &DCI,
8080                                        const RISCVSubtarget &Subtarget) {
8081   SelectionDAG &DAG = DCI.DAG;
8082   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8083   MVT XLenVT = Subtarget.getXLenVT();
8084 
8085   // Only handle XLen types. Other types narrower than XLen will eventually be
8086   // legalized to XLenVT.
8087   EVT DstVT = N->getValueType(0);
8088   if (DstVT != XLenVT)
8089     return SDValue();
8090 
8091   SDValue Src = N->getOperand(0);
8092 
8093   // Ensure the FP type is also legal.
8094   if (!TLI.isTypeLegal(Src.getValueType()))
8095     return SDValue();
8096 
8097   // Don't do this for f16 with Zfhmin and not Zfh.
8098   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8099     return SDValue();
8100 
8101   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8102 
8103   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8104   if (FRM == RISCVFPRndMode::Invalid)
8105     return SDValue();
8106 
8107   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8108 
8109   unsigned Opc;
8110   if (SatVT == DstVT)
8111     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8112   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8113     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8114   else
8115     return SDValue();
8116   // FIXME: Support other SatVTs by clamping before or after the conversion.
8117 
8118   Src = Src.getOperand(0);
8119 
8120   SDLoc DL(N);
8121   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8122                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8123 
8124   // RISCV FP-to-int conversions saturate to the destination register size, but
8125   // don't produce 0 for nan.
8126   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8127   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8128 }
8129 
8130 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8131                                                DAGCombinerInfo &DCI) const {
8132   SelectionDAG &DAG = DCI.DAG;
8133 
8134   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8135   // bits are demanded. N will be added to the Worklist if it was not deleted.
8136   // Caller should return SDValue(N, 0) if this returns true.
8137   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8138     SDValue Op = N->getOperand(OpNo);
8139     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8140     if (!SimplifyDemandedBits(Op, Mask, DCI))
8141       return false;
8142 
8143     if (N->getOpcode() != ISD::DELETED_NODE)
8144       DCI.AddToWorklist(N);
8145     return true;
8146   };
8147 
8148   switch (N->getOpcode()) {
8149   default:
8150     break;
8151   case RISCVISD::SplitF64: {
8152     SDValue Op0 = N->getOperand(0);
8153     // If the input to SplitF64 is just BuildPairF64 then the operation is
8154     // redundant. Instead, use BuildPairF64's operands directly.
8155     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8156       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8157 
8158     if (Op0->isUndef()) {
8159       SDValue Lo = DAG.getUNDEF(MVT::i32);
8160       SDValue Hi = DAG.getUNDEF(MVT::i32);
8161       return DCI.CombineTo(N, Lo, Hi);
8162     }
8163 
8164     SDLoc DL(N);
8165 
8166     // It's cheaper to materialise two 32-bit integers than to load a double
8167     // from the constant pool and transfer it to integer registers through the
8168     // stack.
8169     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8170       APInt V = C->getValueAPF().bitcastToAPInt();
8171       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8172       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8173       return DCI.CombineTo(N, Lo, Hi);
8174     }
8175 
8176     // This is a target-specific version of a DAGCombine performed in
8177     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8178     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8179     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8180     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8181         !Op0.getNode()->hasOneUse())
8182       break;
8183     SDValue NewSplitF64 =
8184         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8185                     Op0.getOperand(0));
8186     SDValue Lo = NewSplitF64.getValue(0);
8187     SDValue Hi = NewSplitF64.getValue(1);
8188     APInt SignBit = APInt::getSignMask(32);
8189     if (Op0.getOpcode() == ISD::FNEG) {
8190       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8191                                   DAG.getConstant(SignBit, DL, MVT::i32));
8192       return DCI.CombineTo(N, Lo, NewHi);
8193     }
8194     assert(Op0.getOpcode() == ISD::FABS);
8195     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8196                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8197     return DCI.CombineTo(N, Lo, NewHi);
8198   }
8199   case RISCVISD::SLLW:
8200   case RISCVISD::SRAW:
8201   case RISCVISD::SRLW: {
8202     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8203     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8204         SimplifyDemandedLowBitsHelper(1, 5))
8205       return SDValue(N, 0);
8206 
8207     break;
8208   }
8209   case ISD::ROTR:
8210   case ISD::ROTL:
8211   case RISCVISD::RORW:
8212   case RISCVISD::ROLW: {
8213     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8214       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8215       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8216           SimplifyDemandedLowBitsHelper(1, 5))
8217         return SDValue(N, 0);
8218     }
8219 
8220     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8221   }
8222   case RISCVISD::CLZW:
8223   case RISCVISD::CTZW: {
8224     // Only the lower 32 bits of the first operand are read
8225     if (SimplifyDemandedLowBitsHelper(0, 32))
8226       return SDValue(N, 0);
8227     break;
8228   }
8229   case RISCVISD::GREV:
8230   case RISCVISD::GORC: {
8231     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8232     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8233     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8234     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8235       return SDValue(N, 0);
8236 
8237     return combineGREVI_GORCI(N, DAG);
8238   }
8239   case RISCVISD::GREVW:
8240   case RISCVISD::GORCW: {
8241     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8242     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8243         SimplifyDemandedLowBitsHelper(1, 5))
8244       return SDValue(N, 0);
8245 
8246     return combineGREVI_GORCI(N, DAG);
8247   }
8248   case RISCVISD::SHFL:
8249   case RISCVISD::UNSHFL: {
8250     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8251     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8252     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8253     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8254       return SDValue(N, 0);
8255 
8256     break;
8257   }
8258   case RISCVISD::SHFLW:
8259   case RISCVISD::UNSHFLW: {
8260     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8261     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8262         SimplifyDemandedLowBitsHelper(1, 4))
8263       return SDValue(N, 0);
8264 
8265     break;
8266   }
8267   case RISCVISD::BCOMPRESSW:
8268   case RISCVISD::BDECOMPRESSW: {
8269     // Only the lower 32 bits of LHS and RHS are read.
8270     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8271         SimplifyDemandedLowBitsHelper(1, 32))
8272       return SDValue(N, 0);
8273 
8274     break;
8275   }
8276   case RISCVISD::FSR:
8277   case RISCVISD::FSL:
8278   case RISCVISD::FSRW:
8279   case RISCVISD::FSLW: {
8280     bool IsWInstruction =
8281         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8282     unsigned BitWidth =
8283         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8284     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8285     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8286     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8287       return SDValue(N, 0);
8288 
8289     break;
8290   }
8291   case RISCVISD::FMV_X_ANYEXTH:
8292   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8293     SDLoc DL(N);
8294     SDValue Op0 = N->getOperand(0);
8295     MVT VT = N->getSimpleValueType(0);
8296     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8297     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8298     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8299     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8300          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8301         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8302          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8303       assert(Op0.getOperand(0).getValueType() == VT &&
8304              "Unexpected value type!");
8305       return Op0.getOperand(0);
8306     }
8307 
8308     // This is a target-specific version of a DAGCombine performed in
8309     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8310     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8311     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8312     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8313         !Op0.getNode()->hasOneUse())
8314       break;
8315     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8316     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8317     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8318     if (Op0.getOpcode() == ISD::FNEG)
8319       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8320                          DAG.getConstant(SignBit, DL, VT));
8321 
8322     assert(Op0.getOpcode() == ISD::FABS);
8323     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8324                        DAG.getConstant(~SignBit, DL, VT));
8325   }
8326   case ISD::ADD:
8327     return performADDCombine(N, DAG, Subtarget);
8328   case ISD::SUB:
8329     return performSUBCombine(N, DAG);
8330   case ISD::AND:
8331     return performANDCombine(N, DAG);
8332   case ISD::OR:
8333     return performORCombine(N, DAG, Subtarget);
8334   case ISD::XOR:
8335     return performXORCombine(N, DAG);
8336   case ISD::SIGN_EXTEND_INREG:
8337     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8338   case ISD::ANY_EXTEND:
8339     return performANY_EXTENDCombine(N, DCI, Subtarget);
8340   case ISD::ZERO_EXTEND:
8341     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8342     // type legalization. This is safe because fp_to_uint produces poison if
8343     // it overflows.
8344     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8345       SDValue Src = N->getOperand(0);
8346       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8347           isTypeLegal(Src.getOperand(0).getValueType()))
8348         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8349                            Src.getOperand(0));
8350       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8351           isTypeLegal(Src.getOperand(1).getValueType())) {
8352         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8353         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8354                                   Src.getOperand(0), Src.getOperand(1));
8355         DCI.CombineTo(N, Res);
8356         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8357         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8358         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8359       }
8360     }
8361     return SDValue();
8362   case RISCVISD::SELECT_CC: {
8363     // Transform
8364     SDValue LHS = N->getOperand(0);
8365     SDValue RHS = N->getOperand(1);
8366     SDValue TrueV = N->getOperand(3);
8367     SDValue FalseV = N->getOperand(4);
8368 
8369     // If the True and False values are the same, we don't need a select_cc.
8370     if (TrueV == FalseV)
8371       return TrueV;
8372 
8373     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8374     if (!ISD::isIntEqualitySetCC(CCVal))
8375       break;
8376 
8377     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8378     //      (select_cc X, Y, lt, trueV, falseV)
8379     // Sometimes the setcc is introduced after select_cc has been formed.
8380     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8381         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8382       // If we're looking for eq 0 instead of ne 0, we need to invert the
8383       // condition.
8384       bool Invert = CCVal == ISD::SETEQ;
8385       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8386       if (Invert)
8387         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8388 
8389       SDLoc DL(N);
8390       RHS = LHS.getOperand(1);
8391       LHS = LHS.getOperand(0);
8392       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8393 
8394       SDValue TargetCC = DAG.getCondCode(CCVal);
8395       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8396                          {LHS, RHS, TargetCC, TrueV, FalseV});
8397     }
8398 
8399     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8400     //      (select_cc X, Y, eq/ne, trueV, falseV)
8401     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8402       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8403                          {LHS.getOperand(0), LHS.getOperand(1),
8404                           N->getOperand(2), TrueV, FalseV});
8405     // (select_cc X, 1, setne, trueV, falseV) ->
8406     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8407     // This can occur when legalizing some floating point comparisons.
8408     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8409     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8410       SDLoc DL(N);
8411       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8412       SDValue TargetCC = DAG.getCondCode(CCVal);
8413       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8414       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8415                          {LHS, RHS, TargetCC, TrueV, FalseV});
8416     }
8417 
8418     break;
8419   }
8420   case RISCVISD::BR_CC: {
8421     SDValue LHS = N->getOperand(1);
8422     SDValue RHS = N->getOperand(2);
8423     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8424     if (!ISD::isIntEqualitySetCC(CCVal))
8425       break;
8426 
8427     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8428     //      (br_cc X, Y, lt, dest)
8429     // Sometimes the setcc is introduced after br_cc has been formed.
8430     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8431         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8432       // If we're looking for eq 0 instead of ne 0, we need to invert the
8433       // condition.
8434       bool Invert = CCVal == ISD::SETEQ;
8435       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8436       if (Invert)
8437         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8438 
8439       SDLoc DL(N);
8440       RHS = LHS.getOperand(1);
8441       LHS = LHS.getOperand(0);
8442       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8443 
8444       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8445                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8446                          N->getOperand(4));
8447     }
8448 
8449     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8450     //      (br_cc X, Y, eq/ne, trueV, falseV)
8451     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8452       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8453                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8454                          N->getOperand(3), N->getOperand(4));
8455 
8456     // (br_cc X, 1, setne, br_cc) ->
8457     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8458     // This can occur when legalizing some floating point comparisons.
8459     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8460     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8461       SDLoc DL(N);
8462       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8463       SDValue TargetCC = DAG.getCondCode(CCVal);
8464       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8465       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8466                          N->getOperand(0), LHS, RHS, TargetCC,
8467                          N->getOperand(4));
8468     }
8469     break;
8470   }
8471   case ISD::FP_TO_SINT:
8472   case ISD::FP_TO_UINT:
8473     return performFP_TO_INTCombine(N, DCI, Subtarget);
8474   case ISD::FP_TO_SINT_SAT:
8475   case ISD::FP_TO_UINT_SAT:
8476     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8477   case ISD::FCOPYSIGN: {
8478     EVT VT = N->getValueType(0);
8479     if (!VT.isVector())
8480       break;
8481     // There is a form of VFSGNJ which injects the negated sign of its second
8482     // operand. Try and bubble any FNEG up after the extend/round to produce
8483     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8484     // TRUNC=1.
8485     SDValue In2 = N->getOperand(1);
8486     // Avoid cases where the extend/round has multiple uses, as duplicating
8487     // those is typically more expensive than removing a fneg.
8488     if (!In2.hasOneUse())
8489       break;
8490     if (In2.getOpcode() != ISD::FP_EXTEND &&
8491         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8492       break;
8493     In2 = In2.getOperand(0);
8494     if (In2.getOpcode() != ISD::FNEG)
8495       break;
8496     SDLoc DL(N);
8497     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8498     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8499                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8500   }
8501   case ISD::MGATHER:
8502   case ISD::MSCATTER:
8503   case ISD::VP_GATHER:
8504   case ISD::VP_SCATTER: {
8505     if (!DCI.isBeforeLegalize())
8506       break;
8507     SDValue Index, ScaleOp;
8508     bool IsIndexScaled = false;
8509     bool IsIndexSigned = false;
8510     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8511       Index = VPGSN->getIndex();
8512       ScaleOp = VPGSN->getScale();
8513       IsIndexScaled = VPGSN->isIndexScaled();
8514       IsIndexSigned = VPGSN->isIndexSigned();
8515     } else {
8516       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8517       Index = MGSN->getIndex();
8518       ScaleOp = MGSN->getScale();
8519       IsIndexScaled = MGSN->isIndexScaled();
8520       IsIndexSigned = MGSN->isIndexSigned();
8521     }
8522     EVT IndexVT = Index.getValueType();
8523     MVT XLenVT = Subtarget.getXLenVT();
8524     // RISCV indexed loads only support the "unsigned unscaled" addressing
8525     // mode, so anything else must be manually legalized.
8526     bool NeedsIdxLegalization =
8527         IsIndexScaled ||
8528         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8529     if (!NeedsIdxLegalization)
8530       break;
8531 
8532     SDLoc DL(N);
8533 
8534     // Any index legalization should first promote to XLenVT, so we don't lose
8535     // bits when scaling. This may create an illegal index type so we let
8536     // LLVM's legalization take care of the splitting.
8537     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8538     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8539       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8540       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8541                           DL, IndexVT, Index);
8542     }
8543 
8544     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8545     if (IsIndexScaled && Scale != 1) {
8546       // Manually scale the indices by the element size.
8547       // TODO: Sanitize the scale operand here?
8548       // TODO: For VP nodes, should we use VP_SHL here?
8549       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8550       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8551       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8552     }
8553 
8554     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8555     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8556       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8557                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8558                               VPGN->getScale(), VPGN->getMask(),
8559                               VPGN->getVectorLength()},
8560                              VPGN->getMemOperand(), NewIndexTy);
8561     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8562       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8563                               {VPSN->getChain(), VPSN->getValue(),
8564                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8565                                VPSN->getMask(), VPSN->getVectorLength()},
8566                               VPSN->getMemOperand(), NewIndexTy);
8567     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8568       return DAG.getMaskedGather(
8569           N->getVTList(), MGN->getMemoryVT(), DL,
8570           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8571            MGN->getBasePtr(), Index, MGN->getScale()},
8572           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8573     const auto *MSN = cast<MaskedScatterSDNode>(N);
8574     return DAG.getMaskedScatter(
8575         N->getVTList(), MSN->getMemoryVT(), DL,
8576         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8577          Index, MSN->getScale()},
8578         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8579   }
8580   case RISCVISD::SRA_VL:
8581   case RISCVISD::SRL_VL:
8582   case RISCVISD::SHL_VL: {
8583     SDValue ShAmt = N->getOperand(1);
8584     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8585       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8586       SDLoc DL(N);
8587       SDValue VL = N->getOperand(3);
8588       EVT VT = N->getValueType(0);
8589       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8590                           ShAmt.getOperand(1), VL);
8591       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8592                          N->getOperand(2), N->getOperand(3));
8593     }
8594     break;
8595   }
8596   case ISD::SRA:
8597   case ISD::SRL:
8598   case ISD::SHL: {
8599     SDValue ShAmt = N->getOperand(1);
8600     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8601       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8602       SDLoc DL(N);
8603       EVT VT = N->getValueType(0);
8604       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8605                           ShAmt.getOperand(1),
8606                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8607       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8608     }
8609     break;
8610   }
8611   case RISCVISD::ADD_VL:
8612     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8613       return V;
8614     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8615   case RISCVISD::SUB_VL:
8616     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8617   case RISCVISD::VWADD_W_VL:
8618   case RISCVISD::VWADDU_W_VL:
8619   case RISCVISD::VWSUB_W_VL:
8620   case RISCVISD::VWSUBU_W_VL:
8621     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8622   case RISCVISD::MUL_VL:
8623     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8624       return V;
8625     // Mul is commutative.
8626     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8627   case ISD::STORE: {
8628     auto *Store = cast<StoreSDNode>(N);
8629     SDValue Val = Store->getValue();
8630     // Combine store of vmv.x.s to vse with VL of 1.
8631     // FIXME: Support FP.
8632     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8633       SDValue Src = Val.getOperand(0);
8634       EVT VecVT = Src.getValueType();
8635       EVT MemVT = Store->getMemoryVT();
8636       // The memory VT and the element type must match.
8637       if (VecVT.getVectorElementType() == MemVT) {
8638         SDLoc DL(N);
8639         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8640         return DAG.getStoreVP(
8641             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8642             DAG.getConstant(1, DL, MaskVT),
8643             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8644             Store->getMemOperand(), Store->getAddressingMode(),
8645             Store->isTruncatingStore(), /*IsCompress*/ false);
8646       }
8647     }
8648 
8649     break;
8650   }
8651   case ISD::SPLAT_VECTOR: {
8652     EVT VT = N->getValueType(0);
8653     // Only perform this combine on legal MVT types.
8654     if (!isTypeLegal(VT))
8655       break;
8656     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8657                                          DAG, Subtarget))
8658       return Gather;
8659     break;
8660   }
8661   case RISCVISD::VMV_V_X_VL: {
8662     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8663     // scalar input.
8664     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8665     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8666     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8667       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8668         return SDValue(N, 0);
8669 
8670     break;
8671   }
8672   case ISD::INTRINSIC_WO_CHAIN: {
8673     unsigned IntNo = N->getConstantOperandVal(0);
8674     switch (IntNo) {
8675       // By default we do not combine any intrinsic.
8676     default:
8677       return SDValue();
8678     case Intrinsic::riscv_vcpop:
8679     case Intrinsic::riscv_vcpop_mask:
8680     case Intrinsic::riscv_vfirst:
8681     case Intrinsic::riscv_vfirst_mask: {
8682       SDValue VL = N->getOperand(2);
8683       if (IntNo == Intrinsic::riscv_vcpop_mask ||
8684           IntNo == Intrinsic::riscv_vfirst_mask)
8685         VL = N->getOperand(3);
8686       if (!isNullConstant(VL))
8687         return SDValue();
8688       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
8689       SDLoc DL(N);
8690       EVT VT = N->getValueType(0);
8691       if (IntNo == Intrinsic::riscv_vfirst ||
8692           IntNo == Intrinsic::riscv_vfirst_mask)
8693         return DAG.getConstant(-1, DL, VT);
8694       return DAG.getConstant(0, DL, VT);
8695     }
8696     }
8697   }
8698   }
8699 
8700   return SDValue();
8701 }
8702 
8703 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8704     const SDNode *N, CombineLevel Level) const {
8705   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8706   // materialised in fewer instructions than `(OP _, c1)`:
8707   //
8708   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8709   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8710   SDValue N0 = N->getOperand(0);
8711   EVT Ty = N0.getValueType();
8712   if (Ty.isScalarInteger() &&
8713       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8714     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8715     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8716     if (C1 && C2) {
8717       const APInt &C1Int = C1->getAPIntValue();
8718       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8719 
8720       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8721       // and the combine should happen, to potentially allow further combines
8722       // later.
8723       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8724           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8725         return true;
8726 
8727       // We can materialise `c1` in an add immediate, so it's "free", and the
8728       // combine should be prevented.
8729       if (C1Int.getMinSignedBits() <= 64 &&
8730           isLegalAddImmediate(C1Int.getSExtValue()))
8731         return false;
8732 
8733       // Neither constant will fit into an immediate, so find materialisation
8734       // costs.
8735       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8736                                               Subtarget.getFeatureBits(),
8737                                               /*CompressionCost*/true);
8738       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8739           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8740           /*CompressionCost*/true);
8741 
8742       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8743       // combine should be prevented.
8744       if (C1Cost < ShiftedC1Cost)
8745         return false;
8746     }
8747   }
8748   return true;
8749 }
8750 
8751 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8752     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8753     TargetLoweringOpt &TLO) const {
8754   // Delay this optimization as late as possible.
8755   if (!TLO.LegalOps)
8756     return false;
8757 
8758   EVT VT = Op.getValueType();
8759   if (VT.isVector())
8760     return false;
8761 
8762   // Only handle AND for now.
8763   if (Op.getOpcode() != ISD::AND)
8764     return false;
8765 
8766   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8767   if (!C)
8768     return false;
8769 
8770   const APInt &Mask = C->getAPIntValue();
8771 
8772   // Clear all non-demanded bits initially.
8773   APInt ShrunkMask = Mask & DemandedBits;
8774 
8775   // Try to make a smaller immediate by setting undemanded bits.
8776 
8777   APInt ExpandedMask = Mask | ~DemandedBits;
8778 
8779   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8780     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8781   };
8782   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8783     if (NewMask == Mask)
8784       return true;
8785     SDLoc DL(Op);
8786     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8787     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8788     return TLO.CombineTo(Op, NewOp);
8789   };
8790 
8791   // If the shrunk mask fits in sign extended 12 bits, let the target
8792   // independent code apply it.
8793   if (ShrunkMask.isSignedIntN(12))
8794     return false;
8795 
8796   // Preserve (and X, 0xffff) when zext.h is supported.
8797   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8798     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8799     if (IsLegalMask(NewMask))
8800       return UseMask(NewMask);
8801   }
8802 
8803   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8804   if (VT == MVT::i64) {
8805     APInt NewMask = APInt(64, 0xffffffff);
8806     if (IsLegalMask(NewMask))
8807       return UseMask(NewMask);
8808   }
8809 
8810   // For the remaining optimizations, we need to be able to make a negative
8811   // number through a combination of mask and undemanded bits.
8812   if (!ExpandedMask.isNegative())
8813     return false;
8814 
8815   // What is the fewest number of bits we need to represent the negative number.
8816   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8817 
8818   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8819   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8820   APInt NewMask = ShrunkMask;
8821   if (MinSignedBits <= 12)
8822     NewMask.setBitsFrom(11);
8823   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8824     NewMask.setBitsFrom(31);
8825   else
8826     return false;
8827 
8828   // Check that our new mask is a subset of the demanded mask.
8829   assert(IsLegalMask(NewMask));
8830   return UseMask(NewMask);
8831 }
8832 
8833 static void computeGREV(APInt &Src, unsigned ShAmt) {
8834   ShAmt &= Src.getBitWidth() - 1;
8835   uint64_t x = Src.getZExtValue();
8836   if (ShAmt & 1)
8837     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8838   if (ShAmt & 2)
8839     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8840   if (ShAmt & 4)
8841     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8842   if (ShAmt & 8)
8843     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8844   if (ShAmt & 16)
8845     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8846   if (ShAmt & 32)
8847     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8848   Src = x;
8849 }
8850 
8851 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8852                                                         KnownBits &Known,
8853                                                         const APInt &DemandedElts,
8854                                                         const SelectionDAG &DAG,
8855                                                         unsigned Depth) const {
8856   unsigned BitWidth = Known.getBitWidth();
8857   unsigned Opc = Op.getOpcode();
8858   assert((Opc >= ISD::BUILTIN_OP_END ||
8859           Opc == ISD::INTRINSIC_WO_CHAIN ||
8860           Opc == ISD::INTRINSIC_W_CHAIN ||
8861           Opc == ISD::INTRINSIC_VOID) &&
8862          "Should use MaskedValueIsZero if you don't know whether Op"
8863          " is a target node!");
8864 
8865   Known.resetAll();
8866   switch (Opc) {
8867   default: break;
8868   case RISCVISD::SELECT_CC: {
8869     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8870     // If we don't know any bits, early out.
8871     if (Known.isUnknown())
8872       break;
8873     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8874 
8875     // Only known if known in both the LHS and RHS.
8876     Known = KnownBits::commonBits(Known, Known2);
8877     break;
8878   }
8879   case RISCVISD::REMUW: {
8880     KnownBits Known2;
8881     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8882     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8883     // We only care about the lower 32 bits.
8884     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8885     // Restore the original width by sign extending.
8886     Known = Known.sext(BitWidth);
8887     break;
8888   }
8889   case RISCVISD::DIVUW: {
8890     KnownBits Known2;
8891     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8892     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8893     // We only care about the lower 32 bits.
8894     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8895     // Restore the original width by sign extending.
8896     Known = Known.sext(BitWidth);
8897     break;
8898   }
8899   case RISCVISD::CTZW: {
8900     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8901     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8902     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8903     Known.Zero.setBitsFrom(LowBits);
8904     break;
8905   }
8906   case RISCVISD::CLZW: {
8907     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8908     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8909     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8910     Known.Zero.setBitsFrom(LowBits);
8911     break;
8912   }
8913   case RISCVISD::GREV:
8914   case RISCVISD::GREVW: {
8915     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8916       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8917       if (Opc == RISCVISD::GREVW)
8918         Known = Known.trunc(32);
8919       unsigned ShAmt = C->getZExtValue();
8920       computeGREV(Known.Zero, ShAmt);
8921       computeGREV(Known.One, ShAmt);
8922       if (Opc == RISCVISD::GREVW)
8923         Known = Known.sext(BitWidth);
8924     }
8925     break;
8926   }
8927   case RISCVISD::READ_VLENB: {
8928     // If we know the minimum VLen from Zvl extensions, we can use that to
8929     // determine the trailing zeros of VLENB.
8930     // FIXME: Limit to 128 bit vectors until we have more testing.
8931     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8932     if (MinVLenB > 0)
8933       Known.Zero.setLowBits(Log2_32(MinVLenB));
8934     // We assume VLENB is no more than 65536 / 8 bytes.
8935     Known.Zero.setBitsFrom(14);
8936     break;
8937   }
8938   case ISD::INTRINSIC_W_CHAIN:
8939   case ISD::INTRINSIC_WO_CHAIN: {
8940     unsigned IntNo =
8941         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8942     switch (IntNo) {
8943     default:
8944       // We can't do anything for most intrinsics.
8945       break;
8946     case Intrinsic::riscv_vsetvli:
8947     case Intrinsic::riscv_vsetvlimax:
8948     case Intrinsic::riscv_vsetvli_opt:
8949     case Intrinsic::riscv_vsetvlimax_opt:
8950       // Assume that VL output is positive and would fit in an int32_t.
8951       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8952       if (BitWidth >= 32)
8953         Known.Zero.setBitsFrom(31);
8954       break;
8955     }
8956     break;
8957   }
8958   }
8959 }
8960 
8961 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8962     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8963     unsigned Depth) const {
8964   switch (Op.getOpcode()) {
8965   default:
8966     break;
8967   case RISCVISD::SELECT_CC: {
8968     unsigned Tmp =
8969         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8970     if (Tmp == 1) return 1;  // Early out.
8971     unsigned Tmp2 =
8972         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8973     return std::min(Tmp, Tmp2);
8974   }
8975   case RISCVISD::SLLW:
8976   case RISCVISD::SRAW:
8977   case RISCVISD::SRLW:
8978   case RISCVISD::DIVW:
8979   case RISCVISD::DIVUW:
8980   case RISCVISD::REMUW:
8981   case RISCVISD::ROLW:
8982   case RISCVISD::RORW:
8983   case RISCVISD::GREVW:
8984   case RISCVISD::GORCW:
8985   case RISCVISD::FSLW:
8986   case RISCVISD::FSRW:
8987   case RISCVISD::SHFLW:
8988   case RISCVISD::UNSHFLW:
8989   case RISCVISD::BCOMPRESSW:
8990   case RISCVISD::BDECOMPRESSW:
8991   case RISCVISD::BFPW:
8992   case RISCVISD::FCVT_W_RV64:
8993   case RISCVISD::FCVT_WU_RV64:
8994   case RISCVISD::STRICT_FCVT_W_RV64:
8995   case RISCVISD::STRICT_FCVT_WU_RV64:
8996     // TODO: As the result is sign-extended, this is conservatively correct. A
8997     // more precise answer could be calculated for SRAW depending on known
8998     // bits in the shift amount.
8999     return 33;
9000   case RISCVISD::SHFL:
9001   case RISCVISD::UNSHFL: {
9002     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9003     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9004     // will stay within the upper 32 bits. If there were more than 32 sign bits
9005     // before there will be at least 33 sign bits after.
9006     if (Op.getValueType() == MVT::i64 &&
9007         isa<ConstantSDNode>(Op.getOperand(1)) &&
9008         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9009       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9010       if (Tmp > 32)
9011         return 33;
9012     }
9013     break;
9014   }
9015   case RISCVISD::VMV_X_S: {
9016     // The number of sign bits of the scalar result is computed by obtaining the
9017     // element type of the input vector operand, subtracting its width from the
9018     // XLEN, and then adding one (sign bit within the element type). If the
9019     // element type is wider than XLen, the least-significant XLEN bits are
9020     // taken.
9021     unsigned XLen = Subtarget.getXLen();
9022     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9023     if (EltBits <= XLen)
9024       return XLen - EltBits + 1;
9025     break;
9026   }
9027   }
9028 
9029   return 1;
9030 }
9031 
9032 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9033                                                   MachineBasicBlock *BB) {
9034   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9035 
9036   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9037   // Should the count have wrapped while it was being read, we need to try
9038   // again.
9039   // ...
9040   // read:
9041   // rdcycleh x3 # load high word of cycle
9042   // rdcycle  x2 # load low word of cycle
9043   // rdcycleh x4 # load high word of cycle
9044   // bne x3, x4, read # check if high word reads match, otherwise try again
9045   // ...
9046 
9047   MachineFunction &MF = *BB->getParent();
9048   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9049   MachineFunction::iterator It = ++BB->getIterator();
9050 
9051   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9052   MF.insert(It, LoopMBB);
9053 
9054   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9055   MF.insert(It, DoneMBB);
9056 
9057   // Transfer the remainder of BB and its successor edges to DoneMBB.
9058   DoneMBB->splice(DoneMBB->begin(), BB,
9059                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9060   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9061 
9062   BB->addSuccessor(LoopMBB);
9063 
9064   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9065   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9066   Register LoReg = MI.getOperand(0).getReg();
9067   Register HiReg = MI.getOperand(1).getReg();
9068   DebugLoc DL = MI.getDebugLoc();
9069 
9070   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9071   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9072       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9073       .addReg(RISCV::X0);
9074   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9075       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9076       .addReg(RISCV::X0);
9077   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9078       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9079       .addReg(RISCV::X0);
9080 
9081   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9082       .addReg(HiReg)
9083       .addReg(ReadAgainReg)
9084       .addMBB(LoopMBB);
9085 
9086   LoopMBB->addSuccessor(LoopMBB);
9087   LoopMBB->addSuccessor(DoneMBB);
9088 
9089   MI.eraseFromParent();
9090 
9091   return DoneMBB;
9092 }
9093 
9094 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9095                                              MachineBasicBlock *BB) {
9096   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9097 
9098   MachineFunction &MF = *BB->getParent();
9099   DebugLoc DL = MI.getDebugLoc();
9100   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9101   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9102   Register LoReg = MI.getOperand(0).getReg();
9103   Register HiReg = MI.getOperand(1).getReg();
9104   Register SrcReg = MI.getOperand(2).getReg();
9105   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9106   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9107 
9108   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9109                           RI);
9110   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9111   MachineMemOperand *MMOLo =
9112       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9113   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9114       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9115   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9116       .addFrameIndex(FI)
9117       .addImm(0)
9118       .addMemOperand(MMOLo);
9119   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9120       .addFrameIndex(FI)
9121       .addImm(4)
9122       .addMemOperand(MMOHi);
9123   MI.eraseFromParent(); // The pseudo instruction is gone now.
9124   return BB;
9125 }
9126 
9127 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9128                                                  MachineBasicBlock *BB) {
9129   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9130          "Unexpected instruction");
9131 
9132   MachineFunction &MF = *BB->getParent();
9133   DebugLoc DL = MI.getDebugLoc();
9134   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9135   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9136   Register DstReg = MI.getOperand(0).getReg();
9137   Register LoReg = MI.getOperand(1).getReg();
9138   Register HiReg = MI.getOperand(2).getReg();
9139   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9140   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9141 
9142   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9143   MachineMemOperand *MMOLo =
9144       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9145   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9146       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9147   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9148       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9149       .addFrameIndex(FI)
9150       .addImm(0)
9151       .addMemOperand(MMOLo);
9152   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9153       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9154       .addFrameIndex(FI)
9155       .addImm(4)
9156       .addMemOperand(MMOHi);
9157   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9158   MI.eraseFromParent(); // The pseudo instruction is gone now.
9159   return BB;
9160 }
9161 
9162 static bool isSelectPseudo(MachineInstr &MI) {
9163   switch (MI.getOpcode()) {
9164   default:
9165     return false;
9166   case RISCV::Select_GPR_Using_CC_GPR:
9167   case RISCV::Select_FPR16_Using_CC_GPR:
9168   case RISCV::Select_FPR32_Using_CC_GPR:
9169   case RISCV::Select_FPR64_Using_CC_GPR:
9170     return true;
9171   }
9172 }
9173 
9174 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9175                                         unsigned RelOpcode, unsigned EqOpcode,
9176                                         const RISCVSubtarget &Subtarget) {
9177   DebugLoc DL = MI.getDebugLoc();
9178   Register DstReg = MI.getOperand(0).getReg();
9179   Register Src1Reg = MI.getOperand(1).getReg();
9180   Register Src2Reg = MI.getOperand(2).getReg();
9181   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9182   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9183   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9184 
9185   // Save the current FFLAGS.
9186   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9187 
9188   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9189                  .addReg(Src1Reg)
9190                  .addReg(Src2Reg);
9191   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9192     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9193 
9194   // Restore the FFLAGS.
9195   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9196       .addReg(SavedFFlags, RegState::Kill);
9197 
9198   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9199   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9200                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9201                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9202   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9203     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9204 
9205   // Erase the pseudoinstruction.
9206   MI.eraseFromParent();
9207   return BB;
9208 }
9209 
9210 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9211                                            MachineBasicBlock *BB,
9212                                            const RISCVSubtarget &Subtarget) {
9213   // To "insert" Select_* instructions, we actually have to insert the triangle
9214   // control-flow pattern.  The incoming instructions know the destination vreg
9215   // to set, the condition code register to branch on, the true/false values to
9216   // select between, and the condcode to use to select the appropriate branch.
9217   //
9218   // We produce the following control flow:
9219   //     HeadMBB
9220   //     |  \
9221   //     |  IfFalseMBB
9222   //     | /
9223   //    TailMBB
9224   //
9225   // When we find a sequence of selects we attempt to optimize their emission
9226   // by sharing the control flow. Currently we only handle cases where we have
9227   // multiple selects with the exact same condition (same LHS, RHS and CC).
9228   // The selects may be interleaved with other instructions if the other
9229   // instructions meet some requirements we deem safe:
9230   // - They are debug instructions. Otherwise,
9231   // - They do not have side-effects, do not access memory and their inputs do
9232   //   not depend on the results of the select pseudo-instructions.
9233   // The TrueV/FalseV operands of the selects cannot depend on the result of
9234   // previous selects in the sequence.
9235   // These conditions could be further relaxed. See the X86 target for a
9236   // related approach and more information.
9237   Register LHS = MI.getOperand(1).getReg();
9238   Register RHS = MI.getOperand(2).getReg();
9239   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9240 
9241   SmallVector<MachineInstr *, 4> SelectDebugValues;
9242   SmallSet<Register, 4> SelectDests;
9243   SelectDests.insert(MI.getOperand(0).getReg());
9244 
9245   MachineInstr *LastSelectPseudo = &MI;
9246 
9247   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9248        SequenceMBBI != E; ++SequenceMBBI) {
9249     if (SequenceMBBI->isDebugInstr())
9250       continue;
9251     else if (isSelectPseudo(*SequenceMBBI)) {
9252       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9253           SequenceMBBI->getOperand(2).getReg() != RHS ||
9254           SequenceMBBI->getOperand(3).getImm() != CC ||
9255           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9256           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9257         break;
9258       LastSelectPseudo = &*SequenceMBBI;
9259       SequenceMBBI->collectDebugValues(SelectDebugValues);
9260       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9261     } else {
9262       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9263           SequenceMBBI->mayLoadOrStore())
9264         break;
9265       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9266             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9267           }))
9268         break;
9269     }
9270   }
9271 
9272   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9273   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9274   DebugLoc DL = MI.getDebugLoc();
9275   MachineFunction::iterator I = ++BB->getIterator();
9276 
9277   MachineBasicBlock *HeadMBB = BB;
9278   MachineFunction *F = BB->getParent();
9279   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9280   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9281 
9282   F->insert(I, IfFalseMBB);
9283   F->insert(I, TailMBB);
9284 
9285   // Transfer debug instructions associated with the selects to TailMBB.
9286   for (MachineInstr *DebugInstr : SelectDebugValues) {
9287     TailMBB->push_back(DebugInstr->removeFromParent());
9288   }
9289 
9290   // Move all instructions after the sequence to TailMBB.
9291   TailMBB->splice(TailMBB->end(), HeadMBB,
9292                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9293   // Update machine-CFG edges by transferring all successors of the current
9294   // block to the new block which will contain the Phi nodes for the selects.
9295   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9296   // Set the successors for HeadMBB.
9297   HeadMBB->addSuccessor(IfFalseMBB);
9298   HeadMBB->addSuccessor(TailMBB);
9299 
9300   // Insert appropriate branch.
9301   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9302     .addReg(LHS)
9303     .addReg(RHS)
9304     .addMBB(TailMBB);
9305 
9306   // IfFalseMBB just falls through to TailMBB.
9307   IfFalseMBB->addSuccessor(TailMBB);
9308 
9309   // Create PHIs for all of the select pseudo-instructions.
9310   auto SelectMBBI = MI.getIterator();
9311   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9312   auto InsertionPoint = TailMBB->begin();
9313   while (SelectMBBI != SelectEnd) {
9314     auto Next = std::next(SelectMBBI);
9315     if (isSelectPseudo(*SelectMBBI)) {
9316       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9317       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9318               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9319           .addReg(SelectMBBI->getOperand(4).getReg())
9320           .addMBB(HeadMBB)
9321           .addReg(SelectMBBI->getOperand(5).getReg())
9322           .addMBB(IfFalseMBB);
9323       SelectMBBI->eraseFromParent();
9324     }
9325     SelectMBBI = Next;
9326   }
9327 
9328   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9329   return TailMBB;
9330 }
9331 
9332 MachineBasicBlock *
9333 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9334                                                  MachineBasicBlock *BB) const {
9335   switch (MI.getOpcode()) {
9336   default:
9337     llvm_unreachable("Unexpected instr type to insert");
9338   case RISCV::ReadCycleWide:
9339     assert(!Subtarget.is64Bit() &&
9340            "ReadCycleWrite is only to be used on riscv32");
9341     return emitReadCycleWidePseudo(MI, BB);
9342   case RISCV::Select_GPR_Using_CC_GPR:
9343   case RISCV::Select_FPR16_Using_CC_GPR:
9344   case RISCV::Select_FPR32_Using_CC_GPR:
9345   case RISCV::Select_FPR64_Using_CC_GPR:
9346     return emitSelectPseudo(MI, BB, Subtarget);
9347   case RISCV::BuildPairF64Pseudo:
9348     return emitBuildPairF64Pseudo(MI, BB);
9349   case RISCV::SplitF64Pseudo:
9350     return emitSplitF64Pseudo(MI, BB);
9351   case RISCV::PseudoQuietFLE_H:
9352     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9353   case RISCV::PseudoQuietFLT_H:
9354     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9355   case RISCV::PseudoQuietFLE_S:
9356     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9357   case RISCV::PseudoQuietFLT_S:
9358     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9359   case RISCV::PseudoQuietFLE_D:
9360     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9361   case RISCV::PseudoQuietFLT_D:
9362     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9363   }
9364 }
9365 
9366 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9367                                                         SDNode *Node) const {
9368   // Add FRM dependency to any instructions with dynamic rounding mode.
9369   unsigned Opc = MI.getOpcode();
9370   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9371   if (Idx < 0)
9372     return;
9373   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9374     return;
9375   // If the instruction already reads FRM, don't add another read.
9376   if (MI.readsRegister(RISCV::FRM))
9377     return;
9378   MI.addOperand(
9379       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9380 }
9381 
9382 // Calling Convention Implementation.
9383 // The expectations for frontend ABI lowering vary from target to target.
9384 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9385 // details, but this is a longer term goal. For now, we simply try to keep the
9386 // role of the frontend as simple and well-defined as possible. The rules can
9387 // be summarised as:
9388 // * Never split up large scalar arguments. We handle them here.
9389 // * If a hardfloat calling convention is being used, and the struct may be
9390 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9391 // available, then pass as two separate arguments. If either the GPRs or FPRs
9392 // are exhausted, then pass according to the rule below.
9393 // * If a struct could never be passed in registers or directly in a stack
9394 // slot (as it is larger than 2*XLEN and the floating point rules don't
9395 // apply), then pass it using a pointer with the byval attribute.
9396 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9397 // word-sized array or a 2*XLEN scalar (depending on alignment).
9398 // * The frontend can determine whether a struct is returned by reference or
9399 // not based on its size and fields. If it will be returned by reference, the
9400 // frontend must modify the prototype so a pointer with the sret annotation is
9401 // passed as the first argument. This is not necessary for large scalar
9402 // returns.
9403 // * Struct return values and varargs should be coerced to structs containing
9404 // register-size fields in the same situations they would be for fixed
9405 // arguments.
9406 
9407 static const MCPhysReg ArgGPRs[] = {
9408   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9409   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9410 };
9411 static const MCPhysReg ArgFPR16s[] = {
9412   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9413   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9414 };
9415 static const MCPhysReg ArgFPR32s[] = {
9416   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9417   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9418 };
9419 static const MCPhysReg ArgFPR64s[] = {
9420   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9421   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9422 };
9423 // This is an interim calling convention and it may be changed in the future.
9424 static const MCPhysReg ArgVRs[] = {
9425     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9426     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9427     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9428 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9429                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9430                                      RISCV::V20M2, RISCV::V22M2};
9431 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9432                                      RISCV::V20M4};
9433 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9434 
9435 // Pass a 2*XLEN argument that has been split into two XLEN values through
9436 // registers or the stack as necessary.
9437 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9438                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9439                                 MVT ValVT2, MVT LocVT2,
9440                                 ISD::ArgFlagsTy ArgFlags2) {
9441   unsigned XLenInBytes = XLen / 8;
9442   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9443     // At least one half can be passed via register.
9444     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9445                                      VA1.getLocVT(), CCValAssign::Full));
9446   } else {
9447     // Both halves must be passed on the stack, with proper alignment.
9448     Align StackAlign =
9449         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9450     State.addLoc(
9451         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9452                             State.AllocateStack(XLenInBytes, StackAlign),
9453                             VA1.getLocVT(), CCValAssign::Full));
9454     State.addLoc(CCValAssign::getMem(
9455         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9456         LocVT2, CCValAssign::Full));
9457     return false;
9458   }
9459 
9460   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9461     // The second half can also be passed via register.
9462     State.addLoc(
9463         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9464   } else {
9465     // The second half is passed via the stack, without additional alignment.
9466     State.addLoc(CCValAssign::getMem(
9467         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9468         LocVT2, CCValAssign::Full));
9469   }
9470 
9471   return false;
9472 }
9473 
9474 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9475                                Optional<unsigned> FirstMaskArgument,
9476                                CCState &State, const RISCVTargetLowering &TLI) {
9477   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9478   if (RC == &RISCV::VRRegClass) {
9479     // Assign the first mask argument to V0.
9480     // This is an interim calling convention and it may be changed in the
9481     // future.
9482     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9483       return State.AllocateReg(RISCV::V0);
9484     return State.AllocateReg(ArgVRs);
9485   }
9486   if (RC == &RISCV::VRM2RegClass)
9487     return State.AllocateReg(ArgVRM2s);
9488   if (RC == &RISCV::VRM4RegClass)
9489     return State.AllocateReg(ArgVRM4s);
9490   if (RC == &RISCV::VRM8RegClass)
9491     return State.AllocateReg(ArgVRM8s);
9492   llvm_unreachable("Unhandled register class for ValueType");
9493 }
9494 
9495 // Implements the RISC-V calling convention. Returns true upon failure.
9496 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9497                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9498                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9499                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9500                      Optional<unsigned> FirstMaskArgument) {
9501   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9502   assert(XLen == 32 || XLen == 64);
9503   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9504 
9505   // Any return value split in to more than two values can't be returned
9506   // directly. Vectors are returned via the available vector registers.
9507   if (!LocVT.isVector() && IsRet && ValNo > 1)
9508     return true;
9509 
9510   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9511   // variadic argument, or if no F16/F32 argument registers are available.
9512   bool UseGPRForF16_F32 = true;
9513   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9514   // variadic argument, or if no F64 argument registers are available.
9515   bool UseGPRForF64 = true;
9516 
9517   switch (ABI) {
9518   default:
9519     llvm_unreachable("Unexpected ABI");
9520   case RISCVABI::ABI_ILP32:
9521   case RISCVABI::ABI_LP64:
9522     break;
9523   case RISCVABI::ABI_ILP32F:
9524   case RISCVABI::ABI_LP64F:
9525     UseGPRForF16_F32 = !IsFixed;
9526     break;
9527   case RISCVABI::ABI_ILP32D:
9528   case RISCVABI::ABI_LP64D:
9529     UseGPRForF16_F32 = !IsFixed;
9530     UseGPRForF64 = !IsFixed;
9531     break;
9532   }
9533 
9534   // FPR16, FPR32, and FPR64 alias each other.
9535   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9536     UseGPRForF16_F32 = true;
9537     UseGPRForF64 = true;
9538   }
9539 
9540   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9541   // similar local variables rather than directly checking against the target
9542   // ABI.
9543 
9544   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9545     LocVT = XLenVT;
9546     LocInfo = CCValAssign::BCvt;
9547   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9548     LocVT = MVT::i64;
9549     LocInfo = CCValAssign::BCvt;
9550   }
9551 
9552   // If this is a variadic argument, the RISC-V calling convention requires
9553   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9554   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9555   // be used regardless of whether the original argument was split during
9556   // legalisation or not. The argument will not be passed by registers if the
9557   // original type is larger than 2*XLEN, so the register alignment rule does
9558   // not apply.
9559   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9560   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9561       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9562     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9563     // Skip 'odd' register if necessary.
9564     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9565       State.AllocateReg(ArgGPRs);
9566   }
9567 
9568   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9569   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9570       State.getPendingArgFlags();
9571 
9572   assert(PendingLocs.size() == PendingArgFlags.size() &&
9573          "PendingLocs and PendingArgFlags out of sync");
9574 
9575   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9576   // registers are exhausted.
9577   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9578     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9579            "Can't lower f64 if it is split");
9580     // Depending on available argument GPRS, f64 may be passed in a pair of
9581     // GPRs, split between a GPR and the stack, or passed completely on the
9582     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9583     // cases.
9584     Register Reg = State.AllocateReg(ArgGPRs);
9585     LocVT = MVT::i32;
9586     if (!Reg) {
9587       unsigned StackOffset = State.AllocateStack(8, Align(8));
9588       State.addLoc(
9589           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9590       return false;
9591     }
9592     if (!State.AllocateReg(ArgGPRs))
9593       State.AllocateStack(4, Align(4));
9594     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9595     return false;
9596   }
9597 
9598   // Fixed-length vectors are located in the corresponding scalable-vector
9599   // container types.
9600   if (ValVT.isFixedLengthVector())
9601     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9602 
9603   // Split arguments might be passed indirectly, so keep track of the pending
9604   // values. Split vectors are passed via a mix of registers and indirectly, so
9605   // treat them as we would any other argument.
9606   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9607     LocVT = XLenVT;
9608     LocInfo = CCValAssign::Indirect;
9609     PendingLocs.push_back(
9610         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9611     PendingArgFlags.push_back(ArgFlags);
9612     if (!ArgFlags.isSplitEnd()) {
9613       return false;
9614     }
9615   }
9616 
9617   // If the split argument only had two elements, it should be passed directly
9618   // in registers or on the stack.
9619   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9620       PendingLocs.size() <= 2) {
9621     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9622     // Apply the normal calling convention rules to the first half of the
9623     // split argument.
9624     CCValAssign VA = PendingLocs[0];
9625     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9626     PendingLocs.clear();
9627     PendingArgFlags.clear();
9628     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9629                                ArgFlags);
9630   }
9631 
9632   // Allocate to a register if possible, or else a stack slot.
9633   Register Reg;
9634   unsigned StoreSizeBytes = XLen / 8;
9635   Align StackAlign = Align(XLen / 8);
9636 
9637   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9638     Reg = State.AllocateReg(ArgFPR16s);
9639   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9640     Reg = State.AllocateReg(ArgFPR32s);
9641   else if (ValVT == MVT::f64 && !UseGPRForF64)
9642     Reg = State.AllocateReg(ArgFPR64s);
9643   else if (ValVT.isVector()) {
9644     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9645     if (!Reg) {
9646       // For return values, the vector must be passed fully via registers or
9647       // via the stack.
9648       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9649       // but we're using all of them.
9650       if (IsRet)
9651         return true;
9652       // Try using a GPR to pass the address
9653       if ((Reg = State.AllocateReg(ArgGPRs))) {
9654         LocVT = XLenVT;
9655         LocInfo = CCValAssign::Indirect;
9656       } else if (ValVT.isScalableVector()) {
9657         LocVT = XLenVT;
9658         LocInfo = CCValAssign::Indirect;
9659       } else {
9660         // Pass fixed-length vectors on the stack.
9661         LocVT = ValVT;
9662         StoreSizeBytes = ValVT.getStoreSize();
9663         // Align vectors to their element sizes, being careful for vXi1
9664         // vectors.
9665         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9666       }
9667     }
9668   } else {
9669     Reg = State.AllocateReg(ArgGPRs);
9670   }
9671 
9672   unsigned StackOffset =
9673       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9674 
9675   // If we reach this point and PendingLocs is non-empty, we must be at the
9676   // end of a split argument that must be passed indirectly.
9677   if (!PendingLocs.empty()) {
9678     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9679     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9680 
9681     for (auto &It : PendingLocs) {
9682       if (Reg)
9683         It.convertToReg(Reg);
9684       else
9685         It.convertToMem(StackOffset);
9686       State.addLoc(It);
9687     }
9688     PendingLocs.clear();
9689     PendingArgFlags.clear();
9690     return false;
9691   }
9692 
9693   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9694           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9695          "Expected an XLenVT or vector types at this stage");
9696 
9697   if (Reg) {
9698     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9699     return false;
9700   }
9701 
9702   // When a floating-point value is passed on the stack, no bit-conversion is
9703   // needed.
9704   if (ValVT.isFloatingPoint()) {
9705     LocVT = ValVT;
9706     LocInfo = CCValAssign::Full;
9707   }
9708   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9709   return false;
9710 }
9711 
9712 template <typename ArgTy>
9713 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9714   for (const auto &ArgIdx : enumerate(Args)) {
9715     MVT ArgVT = ArgIdx.value().VT;
9716     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9717       return ArgIdx.index();
9718   }
9719   return None;
9720 }
9721 
9722 void RISCVTargetLowering::analyzeInputArgs(
9723     MachineFunction &MF, CCState &CCInfo,
9724     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9725     RISCVCCAssignFn Fn) const {
9726   unsigned NumArgs = Ins.size();
9727   FunctionType *FType = MF.getFunction().getFunctionType();
9728 
9729   Optional<unsigned> FirstMaskArgument;
9730   if (Subtarget.hasVInstructions())
9731     FirstMaskArgument = preAssignMask(Ins);
9732 
9733   for (unsigned i = 0; i != NumArgs; ++i) {
9734     MVT ArgVT = Ins[i].VT;
9735     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9736 
9737     Type *ArgTy = nullptr;
9738     if (IsRet)
9739       ArgTy = FType->getReturnType();
9740     else if (Ins[i].isOrigArg())
9741       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9742 
9743     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9744     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9745            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9746            FirstMaskArgument)) {
9747       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9748                         << EVT(ArgVT).getEVTString() << '\n');
9749       llvm_unreachable(nullptr);
9750     }
9751   }
9752 }
9753 
9754 void RISCVTargetLowering::analyzeOutputArgs(
9755     MachineFunction &MF, CCState &CCInfo,
9756     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9757     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9758   unsigned NumArgs = Outs.size();
9759 
9760   Optional<unsigned> FirstMaskArgument;
9761   if (Subtarget.hasVInstructions())
9762     FirstMaskArgument = preAssignMask(Outs);
9763 
9764   for (unsigned i = 0; i != NumArgs; i++) {
9765     MVT ArgVT = Outs[i].VT;
9766     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9767     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9768 
9769     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9770     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9771            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9772            FirstMaskArgument)) {
9773       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9774                         << EVT(ArgVT).getEVTString() << "\n");
9775       llvm_unreachable(nullptr);
9776     }
9777   }
9778 }
9779 
9780 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9781 // values.
9782 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9783                                    const CCValAssign &VA, const SDLoc &DL,
9784                                    const RISCVSubtarget &Subtarget) {
9785   switch (VA.getLocInfo()) {
9786   default:
9787     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9788   case CCValAssign::Full:
9789     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9790       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9791     break;
9792   case CCValAssign::BCvt:
9793     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9794       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9795     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9796       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9797     else
9798       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9799     break;
9800   }
9801   return Val;
9802 }
9803 
9804 // The caller is responsible for loading the full value if the argument is
9805 // passed with CCValAssign::Indirect.
9806 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9807                                 const CCValAssign &VA, const SDLoc &DL,
9808                                 const RISCVTargetLowering &TLI) {
9809   MachineFunction &MF = DAG.getMachineFunction();
9810   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9811   EVT LocVT = VA.getLocVT();
9812   SDValue Val;
9813   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9814   Register VReg = RegInfo.createVirtualRegister(RC);
9815   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9816   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9817 
9818   if (VA.getLocInfo() == CCValAssign::Indirect)
9819     return Val;
9820 
9821   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9822 }
9823 
9824 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9825                                    const CCValAssign &VA, const SDLoc &DL,
9826                                    const RISCVSubtarget &Subtarget) {
9827   EVT LocVT = VA.getLocVT();
9828 
9829   switch (VA.getLocInfo()) {
9830   default:
9831     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9832   case CCValAssign::Full:
9833     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9834       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9835     break;
9836   case CCValAssign::BCvt:
9837     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9838       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9839     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9840       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9841     else
9842       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9843     break;
9844   }
9845   return Val;
9846 }
9847 
9848 // The caller is responsible for loading the full value if the argument is
9849 // passed with CCValAssign::Indirect.
9850 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9851                                 const CCValAssign &VA, const SDLoc &DL) {
9852   MachineFunction &MF = DAG.getMachineFunction();
9853   MachineFrameInfo &MFI = MF.getFrameInfo();
9854   EVT LocVT = VA.getLocVT();
9855   EVT ValVT = VA.getValVT();
9856   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9857   if (ValVT.isScalableVector()) {
9858     // When the value is a scalable vector, we save the pointer which points to
9859     // the scalable vector value in the stack. The ValVT will be the pointer
9860     // type, instead of the scalable vector type.
9861     ValVT = LocVT;
9862   }
9863   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9864                                  /*IsImmutable=*/true);
9865   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9866   SDValue Val;
9867 
9868   ISD::LoadExtType ExtType;
9869   switch (VA.getLocInfo()) {
9870   default:
9871     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9872   case CCValAssign::Full:
9873   case CCValAssign::Indirect:
9874   case CCValAssign::BCvt:
9875     ExtType = ISD::NON_EXTLOAD;
9876     break;
9877   }
9878   Val = DAG.getExtLoad(
9879       ExtType, DL, LocVT, Chain, FIN,
9880       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9881   return Val;
9882 }
9883 
9884 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9885                                        const CCValAssign &VA, const SDLoc &DL) {
9886   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9887          "Unexpected VA");
9888   MachineFunction &MF = DAG.getMachineFunction();
9889   MachineFrameInfo &MFI = MF.getFrameInfo();
9890   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9891 
9892   if (VA.isMemLoc()) {
9893     // f64 is passed on the stack.
9894     int FI =
9895         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9896     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9897     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9898                        MachinePointerInfo::getFixedStack(MF, FI));
9899   }
9900 
9901   assert(VA.isRegLoc() && "Expected register VA assignment");
9902 
9903   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9904   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9905   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9906   SDValue Hi;
9907   if (VA.getLocReg() == RISCV::X17) {
9908     // Second half of f64 is passed on the stack.
9909     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9910     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9911     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9912                      MachinePointerInfo::getFixedStack(MF, FI));
9913   } else {
9914     // Second half of f64 is passed in another GPR.
9915     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9916     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9917     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9918   }
9919   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9920 }
9921 
9922 // FastCC has less than 1% performance improvement for some particular
9923 // benchmark. But theoretically, it may has benenfit for some cases.
9924 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9925                             unsigned ValNo, MVT ValVT, MVT LocVT,
9926                             CCValAssign::LocInfo LocInfo,
9927                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9928                             bool IsFixed, bool IsRet, Type *OrigTy,
9929                             const RISCVTargetLowering &TLI,
9930                             Optional<unsigned> FirstMaskArgument) {
9931 
9932   // X5 and X6 might be used for save-restore libcall.
9933   static const MCPhysReg GPRList[] = {
9934       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9935       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9936       RISCV::X29, RISCV::X30, RISCV::X31};
9937 
9938   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9939     if (unsigned Reg = State.AllocateReg(GPRList)) {
9940       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9941       return false;
9942     }
9943   }
9944 
9945   if (LocVT == MVT::f16) {
9946     static const MCPhysReg FPR16List[] = {
9947         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9948         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9949         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9950         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9951     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9952       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9953       return false;
9954     }
9955   }
9956 
9957   if (LocVT == MVT::f32) {
9958     static const MCPhysReg FPR32List[] = {
9959         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9960         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9961         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9962         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9963     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9964       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9965       return false;
9966     }
9967   }
9968 
9969   if (LocVT == MVT::f64) {
9970     static const MCPhysReg FPR64List[] = {
9971         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9972         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9973         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9974         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9975     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9976       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9977       return false;
9978     }
9979   }
9980 
9981   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9982     unsigned Offset4 = State.AllocateStack(4, Align(4));
9983     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9984     return false;
9985   }
9986 
9987   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9988     unsigned Offset5 = State.AllocateStack(8, Align(8));
9989     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9990     return false;
9991   }
9992 
9993   if (LocVT.isVector()) {
9994     if (unsigned Reg =
9995             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9996       // Fixed-length vectors are located in the corresponding scalable-vector
9997       // container types.
9998       if (ValVT.isFixedLengthVector())
9999         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10000       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10001     } else {
10002       // Try and pass the address via a "fast" GPR.
10003       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10004         LocInfo = CCValAssign::Indirect;
10005         LocVT = TLI.getSubtarget().getXLenVT();
10006         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10007       } else if (ValVT.isFixedLengthVector()) {
10008         auto StackAlign =
10009             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10010         unsigned StackOffset =
10011             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10012         State.addLoc(
10013             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10014       } else {
10015         // Can't pass scalable vectors on the stack.
10016         return true;
10017       }
10018     }
10019 
10020     return false;
10021   }
10022 
10023   return true; // CC didn't match.
10024 }
10025 
10026 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10027                          CCValAssign::LocInfo LocInfo,
10028                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10029 
10030   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10031     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10032     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10033     static const MCPhysReg GPRList[] = {
10034         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10035         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10036     if (unsigned Reg = State.AllocateReg(GPRList)) {
10037       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10038       return false;
10039     }
10040   }
10041 
10042   if (LocVT == MVT::f32) {
10043     // Pass in STG registers: F1, ..., F6
10044     //                        fs0 ... fs5
10045     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10046                                           RISCV::F18_F, RISCV::F19_F,
10047                                           RISCV::F20_F, RISCV::F21_F};
10048     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10049       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10050       return false;
10051     }
10052   }
10053 
10054   if (LocVT == MVT::f64) {
10055     // Pass in STG registers: D1, ..., D6
10056     //                        fs6 ... fs11
10057     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10058                                           RISCV::F24_D, RISCV::F25_D,
10059                                           RISCV::F26_D, RISCV::F27_D};
10060     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10061       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10062       return false;
10063     }
10064   }
10065 
10066   report_fatal_error("No registers left in GHC calling convention");
10067   return true;
10068 }
10069 
10070 // Transform physical registers into virtual registers.
10071 SDValue RISCVTargetLowering::LowerFormalArguments(
10072     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10073     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10074     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10075 
10076   MachineFunction &MF = DAG.getMachineFunction();
10077 
10078   switch (CallConv) {
10079   default:
10080     report_fatal_error("Unsupported calling convention");
10081   case CallingConv::C:
10082   case CallingConv::Fast:
10083     break;
10084   case CallingConv::GHC:
10085     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10086         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10087       report_fatal_error(
10088         "GHC calling convention requires the F and D instruction set extensions");
10089   }
10090 
10091   const Function &Func = MF.getFunction();
10092   if (Func.hasFnAttribute("interrupt")) {
10093     if (!Func.arg_empty())
10094       report_fatal_error(
10095         "Functions with the interrupt attribute cannot have arguments!");
10096 
10097     StringRef Kind =
10098       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10099 
10100     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10101       report_fatal_error(
10102         "Function interrupt attribute argument not supported!");
10103   }
10104 
10105   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10106   MVT XLenVT = Subtarget.getXLenVT();
10107   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10108   // Used with vargs to acumulate store chains.
10109   std::vector<SDValue> OutChains;
10110 
10111   // Assign locations to all of the incoming arguments.
10112   SmallVector<CCValAssign, 16> ArgLocs;
10113   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10114 
10115   if (CallConv == CallingConv::GHC)
10116     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10117   else
10118     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10119                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10120                                                    : CC_RISCV);
10121 
10122   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10123     CCValAssign &VA = ArgLocs[i];
10124     SDValue ArgValue;
10125     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10126     // case.
10127     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10128       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10129     else if (VA.isRegLoc())
10130       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10131     else
10132       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10133 
10134     if (VA.getLocInfo() == CCValAssign::Indirect) {
10135       // If the original argument was split and passed by reference (e.g. i128
10136       // on RV32), we need to load all parts of it here (using the same
10137       // address). Vectors may be partly split to registers and partly to the
10138       // stack, in which case the base address is partly offset and subsequent
10139       // stores are relative to that.
10140       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10141                                    MachinePointerInfo()));
10142       unsigned ArgIndex = Ins[i].OrigArgIndex;
10143       unsigned ArgPartOffset = Ins[i].PartOffset;
10144       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10145       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10146         CCValAssign &PartVA = ArgLocs[i + 1];
10147         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10148         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10149         if (PartVA.getValVT().isScalableVector())
10150           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10151         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10152         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10153                                      MachinePointerInfo()));
10154         ++i;
10155       }
10156       continue;
10157     }
10158     InVals.push_back(ArgValue);
10159   }
10160 
10161   if (IsVarArg) {
10162     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10163     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10164     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10165     MachineFrameInfo &MFI = MF.getFrameInfo();
10166     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10167     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10168 
10169     // Offset of the first variable argument from stack pointer, and size of
10170     // the vararg save area. For now, the varargs save area is either zero or
10171     // large enough to hold a0-a7.
10172     int VaArgOffset, VarArgsSaveSize;
10173 
10174     // If all registers are allocated, then all varargs must be passed on the
10175     // stack and we don't need to save any argregs.
10176     if (ArgRegs.size() == Idx) {
10177       VaArgOffset = CCInfo.getNextStackOffset();
10178       VarArgsSaveSize = 0;
10179     } else {
10180       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10181       VaArgOffset = -VarArgsSaveSize;
10182     }
10183 
10184     // Record the frame index of the first variable argument
10185     // which is a value necessary to VASTART.
10186     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10187     RVFI->setVarArgsFrameIndex(FI);
10188 
10189     // If saving an odd number of registers then create an extra stack slot to
10190     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10191     // offsets to even-numbered registered remain 2*XLEN-aligned.
10192     if (Idx % 2) {
10193       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10194       VarArgsSaveSize += XLenInBytes;
10195     }
10196 
10197     // Copy the integer registers that may have been used for passing varargs
10198     // to the vararg save area.
10199     for (unsigned I = Idx; I < ArgRegs.size();
10200          ++I, VaArgOffset += XLenInBytes) {
10201       const Register Reg = RegInfo.createVirtualRegister(RC);
10202       RegInfo.addLiveIn(ArgRegs[I], Reg);
10203       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10204       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10205       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10206       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10207                                    MachinePointerInfo::getFixedStack(MF, FI));
10208       cast<StoreSDNode>(Store.getNode())
10209           ->getMemOperand()
10210           ->setValue((Value *)nullptr);
10211       OutChains.push_back(Store);
10212     }
10213     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10214   }
10215 
10216   // All stores are grouped in one node to allow the matching between
10217   // the size of Ins and InVals. This only happens for vararg functions.
10218   if (!OutChains.empty()) {
10219     OutChains.push_back(Chain);
10220     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10221   }
10222 
10223   return Chain;
10224 }
10225 
10226 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10227 /// for tail call optimization.
10228 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10229 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10230     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10231     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10232 
10233   auto &Callee = CLI.Callee;
10234   auto CalleeCC = CLI.CallConv;
10235   auto &Outs = CLI.Outs;
10236   auto &Caller = MF.getFunction();
10237   auto CallerCC = Caller.getCallingConv();
10238 
10239   // Exception-handling functions need a special set of instructions to
10240   // indicate a return to the hardware. Tail-calling another function would
10241   // probably break this.
10242   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10243   // should be expanded as new function attributes are introduced.
10244   if (Caller.hasFnAttribute("interrupt"))
10245     return false;
10246 
10247   // Do not tail call opt if the stack is used to pass parameters.
10248   if (CCInfo.getNextStackOffset() != 0)
10249     return false;
10250 
10251   // Do not tail call opt if any parameters need to be passed indirectly.
10252   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10253   // passed indirectly. So the address of the value will be passed in a
10254   // register, or if not available, then the address is put on the stack. In
10255   // order to pass indirectly, space on the stack often needs to be allocated
10256   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10257   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10258   // are passed CCValAssign::Indirect.
10259   for (auto &VA : ArgLocs)
10260     if (VA.getLocInfo() == CCValAssign::Indirect)
10261       return false;
10262 
10263   // Do not tail call opt if either caller or callee uses struct return
10264   // semantics.
10265   auto IsCallerStructRet = Caller.hasStructRetAttr();
10266   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10267   if (IsCallerStructRet || IsCalleeStructRet)
10268     return false;
10269 
10270   // Externally-defined functions with weak linkage should not be
10271   // tail-called. The behaviour of branch instructions in this situation (as
10272   // used for tail calls) is implementation-defined, so we cannot rely on the
10273   // linker replacing the tail call with a return.
10274   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10275     const GlobalValue *GV = G->getGlobal();
10276     if (GV->hasExternalWeakLinkage())
10277       return false;
10278   }
10279 
10280   // The callee has to preserve all registers the caller needs to preserve.
10281   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10282   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10283   if (CalleeCC != CallerCC) {
10284     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10285     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10286       return false;
10287   }
10288 
10289   // Byval parameters hand the function a pointer directly into the stack area
10290   // we want to reuse during a tail call. Working around this *is* possible
10291   // but less efficient and uglier in LowerCall.
10292   for (auto &Arg : Outs)
10293     if (Arg.Flags.isByVal())
10294       return false;
10295 
10296   return true;
10297 }
10298 
10299 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10300   return DAG.getDataLayout().getPrefTypeAlign(
10301       VT.getTypeForEVT(*DAG.getContext()));
10302 }
10303 
10304 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10305 // and output parameter nodes.
10306 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10307                                        SmallVectorImpl<SDValue> &InVals) const {
10308   SelectionDAG &DAG = CLI.DAG;
10309   SDLoc &DL = CLI.DL;
10310   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10311   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10312   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10313   SDValue Chain = CLI.Chain;
10314   SDValue Callee = CLI.Callee;
10315   bool &IsTailCall = CLI.IsTailCall;
10316   CallingConv::ID CallConv = CLI.CallConv;
10317   bool IsVarArg = CLI.IsVarArg;
10318   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10319   MVT XLenVT = Subtarget.getXLenVT();
10320 
10321   MachineFunction &MF = DAG.getMachineFunction();
10322 
10323   // Analyze the operands of the call, assigning locations to each operand.
10324   SmallVector<CCValAssign, 16> ArgLocs;
10325   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10326 
10327   if (CallConv == CallingConv::GHC)
10328     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10329   else
10330     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10331                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10332                                                     : CC_RISCV);
10333 
10334   // Check if it's really possible to do a tail call.
10335   if (IsTailCall)
10336     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10337 
10338   if (IsTailCall)
10339     ++NumTailCalls;
10340   else if (CLI.CB && CLI.CB->isMustTailCall())
10341     report_fatal_error("failed to perform tail call elimination on a call "
10342                        "site marked musttail");
10343 
10344   // Get a count of how many bytes are to be pushed on the stack.
10345   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10346 
10347   // Create local copies for byval args
10348   SmallVector<SDValue, 8> ByValArgs;
10349   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10350     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10351     if (!Flags.isByVal())
10352       continue;
10353 
10354     SDValue Arg = OutVals[i];
10355     unsigned Size = Flags.getByValSize();
10356     Align Alignment = Flags.getNonZeroByValAlign();
10357 
10358     int FI =
10359         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10360     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10361     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10362 
10363     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10364                           /*IsVolatile=*/false,
10365                           /*AlwaysInline=*/false, IsTailCall,
10366                           MachinePointerInfo(), MachinePointerInfo());
10367     ByValArgs.push_back(FIPtr);
10368   }
10369 
10370   if (!IsTailCall)
10371     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10372 
10373   // Copy argument values to their designated locations.
10374   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10375   SmallVector<SDValue, 8> MemOpChains;
10376   SDValue StackPtr;
10377   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10378     CCValAssign &VA = ArgLocs[i];
10379     SDValue ArgValue = OutVals[i];
10380     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10381 
10382     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10383     bool IsF64OnRV32DSoftABI =
10384         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10385     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10386       SDValue SplitF64 = DAG.getNode(
10387           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10388       SDValue Lo = SplitF64.getValue(0);
10389       SDValue Hi = SplitF64.getValue(1);
10390 
10391       Register RegLo = VA.getLocReg();
10392       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10393 
10394       if (RegLo == RISCV::X17) {
10395         // Second half of f64 is passed on the stack.
10396         // Work out the address of the stack slot.
10397         if (!StackPtr.getNode())
10398           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10399         // Emit the store.
10400         MemOpChains.push_back(
10401             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10402       } else {
10403         // Second half of f64 is passed in another GPR.
10404         assert(RegLo < RISCV::X31 && "Invalid register pair");
10405         Register RegHigh = RegLo + 1;
10406         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10407       }
10408       continue;
10409     }
10410 
10411     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10412     // as any other MemLoc.
10413 
10414     // Promote the value if needed.
10415     // For now, only handle fully promoted and indirect arguments.
10416     if (VA.getLocInfo() == CCValAssign::Indirect) {
10417       // Store the argument in a stack slot and pass its address.
10418       Align StackAlign =
10419           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10420                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10421       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10422       // If the original argument was split (e.g. i128), we need
10423       // to store the required parts of it here (and pass just one address).
10424       // Vectors may be partly split to registers and partly to the stack, in
10425       // which case the base address is partly offset and subsequent stores are
10426       // relative to that.
10427       unsigned ArgIndex = Outs[i].OrigArgIndex;
10428       unsigned ArgPartOffset = Outs[i].PartOffset;
10429       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10430       // Calculate the total size to store. We don't have access to what we're
10431       // actually storing other than performing the loop and collecting the
10432       // info.
10433       SmallVector<std::pair<SDValue, SDValue>> Parts;
10434       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10435         SDValue PartValue = OutVals[i + 1];
10436         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10437         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10438         EVT PartVT = PartValue.getValueType();
10439         if (PartVT.isScalableVector())
10440           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10441         StoredSize += PartVT.getStoreSize();
10442         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10443         Parts.push_back(std::make_pair(PartValue, Offset));
10444         ++i;
10445       }
10446       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10447       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10448       MemOpChains.push_back(
10449           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10450                        MachinePointerInfo::getFixedStack(MF, FI)));
10451       for (const auto &Part : Parts) {
10452         SDValue PartValue = Part.first;
10453         SDValue PartOffset = Part.second;
10454         SDValue Address =
10455             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10456         MemOpChains.push_back(
10457             DAG.getStore(Chain, DL, PartValue, Address,
10458                          MachinePointerInfo::getFixedStack(MF, FI)));
10459       }
10460       ArgValue = SpillSlot;
10461     } else {
10462       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10463     }
10464 
10465     // Use local copy if it is a byval arg.
10466     if (Flags.isByVal())
10467       ArgValue = ByValArgs[j++];
10468 
10469     if (VA.isRegLoc()) {
10470       // Queue up the argument copies and emit them at the end.
10471       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10472     } else {
10473       assert(VA.isMemLoc() && "Argument not register or memory");
10474       assert(!IsTailCall && "Tail call not allowed if stack is used "
10475                             "for passing parameters");
10476 
10477       // Work out the address of the stack slot.
10478       if (!StackPtr.getNode())
10479         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10480       SDValue Address =
10481           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10482                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10483 
10484       // Emit the store.
10485       MemOpChains.push_back(
10486           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10487     }
10488   }
10489 
10490   // Join the stores, which are independent of one another.
10491   if (!MemOpChains.empty())
10492     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10493 
10494   SDValue Glue;
10495 
10496   // Build a sequence of copy-to-reg nodes, chained and glued together.
10497   for (auto &Reg : RegsToPass) {
10498     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10499     Glue = Chain.getValue(1);
10500   }
10501 
10502   // Validate that none of the argument registers have been marked as
10503   // reserved, if so report an error. Do the same for the return address if this
10504   // is not a tailcall.
10505   validateCCReservedRegs(RegsToPass, MF);
10506   if (!IsTailCall &&
10507       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10508     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10509         MF.getFunction(),
10510         "Return address register required, but has been reserved."});
10511 
10512   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10513   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10514   // split it and then direct call can be matched by PseudoCALL.
10515   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10516     const GlobalValue *GV = S->getGlobal();
10517 
10518     unsigned OpFlags = RISCVII::MO_CALL;
10519     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10520       OpFlags = RISCVII::MO_PLT;
10521 
10522     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10523   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10524     unsigned OpFlags = RISCVII::MO_CALL;
10525 
10526     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10527                                                  nullptr))
10528       OpFlags = RISCVII::MO_PLT;
10529 
10530     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10531   }
10532 
10533   // The first call operand is the chain and the second is the target address.
10534   SmallVector<SDValue, 8> Ops;
10535   Ops.push_back(Chain);
10536   Ops.push_back(Callee);
10537 
10538   // Add argument registers to the end of the list so that they are
10539   // known live into the call.
10540   for (auto &Reg : RegsToPass)
10541     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10542 
10543   if (!IsTailCall) {
10544     // Add a register mask operand representing the call-preserved registers.
10545     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10546     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10547     assert(Mask && "Missing call preserved mask for calling convention");
10548     Ops.push_back(DAG.getRegisterMask(Mask));
10549   }
10550 
10551   // Glue the call to the argument copies, if any.
10552   if (Glue.getNode())
10553     Ops.push_back(Glue);
10554 
10555   // Emit the call.
10556   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10557 
10558   if (IsTailCall) {
10559     MF.getFrameInfo().setHasTailCall();
10560     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10561   }
10562 
10563   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10564   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10565   Glue = Chain.getValue(1);
10566 
10567   // Mark the end of the call, which is glued to the call itself.
10568   Chain = DAG.getCALLSEQ_END(Chain,
10569                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10570                              DAG.getConstant(0, DL, PtrVT, true),
10571                              Glue, DL);
10572   Glue = Chain.getValue(1);
10573 
10574   // Assign locations to each value returned by this call.
10575   SmallVector<CCValAssign, 16> RVLocs;
10576   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10577   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10578 
10579   // Copy all of the result registers out of their specified physreg.
10580   for (auto &VA : RVLocs) {
10581     // Copy the value out
10582     SDValue RetValue =
10583         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10584     // Glue the RetValue to the end of the call sequence
10585     Chain = RetValue.getValue(1);
10586     Glue = RetValue.getValue(2);
10587 
10588     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10589       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10590       SDValue RetValue2 =
10591           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10592       Chain = RetValue2.getValue(1);
10593       Glue = RetValue2.getValue(2);
10594       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10595                              RetValue2);
10596     }
10597 
10598     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10599 
10600     InVals.push_back(RetValue);
10601   }
10602 
10603   return Chain;
10604 }
10605 
10606 bool RISCVTargetLowering::CanLowerReturn(
10607     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10608     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10609   SmallVector<CCValAssign, 16> RVLocs;
10610   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10611 
10612   Optional<unsigned> FirstMaskArgument;
10613   if (Subtarget.hasVInstructions())
10614     FirstMaskArgument = preAssignMask(Outs);
10615 
10616   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10617     MVT VT = Outs[i].VT;
10618     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10619     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10620     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10621                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10622                  *this, FirstMaskArgument))
10623       return false;
10624   }
10625   return true;
10626 }
10627 
10628 SDValue
10629 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10630                                  bool IsVarArg,
10631                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10632                                  const SmallVectorImpl<SDValue> &OutVals,
10633                                  const SDLoc &DL, SelectionDAG &DAG) const {
10634   const MachineFunction &MF = DAG.getMachineFunction();
10635   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10636 
10637   // Stores the assignment of the return value to a location.
10638   SmallVector<CCValAssign, 16> RVLocs;
10639 
10640   // Info about the registers and stack slot.
10641   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10642                  *DAG.getContext());
10643 
10644   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10645                     nullptr, CC_RISCV);
10646 
10647   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10648     report_fatal_error("GHC functions return void only");
10649 
10650   SDValue Glue;
10651   SmallVector<SDValue, 4> RetOps(1, Chain);
10652 
10653   // Copy the result values into the output registers.
10654   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10655     SDValue Val = OutVals[i];
10656     CCValAssign &VA = RVLocs[i];
10657     assert(VA.isRegLoc() && "Can only return in registers!");
10658 
10659     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10660       // Handle returning f64 on RV32D with a soft float ABI.
10661       assert(VA.isRegLoc() && "Expected return via registers");
10662       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10663                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10664       SDValue Lo = SplitF64.getValue(0);
10665       SDValue Hi = SplitF64.getValue(1);
10666       Register RegLo = VA.getLocReg();
10667       assert(RegLo < RISCV::X31 && "Invalid register pair");
10668       Register RegHi = RegLo + 1;
10669 
10670       if (STI.isRegisterReservedByUser(RegLo) ||
10671           STI.isRegisterReservedByUser(RegHi))
10672         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10673             MF.getFunction(),
10674             "Return value register required, but has been reserved."});
10675 
10676       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10677       Glue = Chain.getValue(1);
10678       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10679       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10680       Glue = Chain.getValue(1);
10681       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10682     } else {
10683       // Handle a 'normal' return.
10684       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10685       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10686 
10687       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10688         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10689             MF.getFunction(),
10690             "Return value register required, but has been reserved."});
10691 
10692       // Guarantee that all emitted copies are stuck together.
10693       Glue = Chain.getValue(1);
10694       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10695     }
10696   }
10697 
10698   RetOps[0] = Chain; // Update chain.
10699 
10700   // Add the glue node if we have it.
10701   if (Glue.getNode()) {
10702     RetOps.push_back(Glue);
10703   }
10704 
10705   unsigned RetOpc = RISCVISD::RET_FLAG;
10706   // Interrupt service routines use different return instructions.
10707   const Function &Func = DAG.getMachineFunction().getFunction();
10708   if (Func.hasFnAttribute("interrupt")) {
10709     if (!Func.getReturnType()->isVoidTy())
10710       report_fatal_error(
10711           "Functions with the interrupt attribute must have void return type!");
10712 
10713     MachineFunction &MF = DAG.getMachineFunction();
10714     StringRef Kind =
10715       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10716 
10717     if (Kind == "user")
10718       RetOpc = RISCVISD::URET_FLAG;
10719     else if (Kind == "supervisor")
10720       RetOpc = RISCVISD::SRET_FLAG;
10721     else
10722       RetOpc = RISCVISD::MRET_FLAG;
10723   }
10724 
10725   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10726 }
10727 
10728 void RISCVTargetLowering::validateCCReservedRegs(
10729     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10730     MachineFunction &MF) const {
10731   const Function &F = MF.getFunction();
10732   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10733 
10734   if (llvm::any_of(Regs, [&STI](auto Reg) {
10735         return STI.isRegisterReservedByUser(Reg.first);
10736       }))
10737     F.getContext().diagnose(DiagnosticInfoUnsupported{
10738         F, "Argument register required, but has been reserved."});
10739 }
10740 
10741 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10742   return CI->isTailCall();
10743 }
10744 
10745 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10746 #define NODE_NAME_CASE(NODE)                                                   \
10747   case RISCVISD::NODE:                                                         \
10748     return "RISCVISD::" #NODE;
10749   // clang-format off
10750   switch ((RISCVISD::NodeType)Opcode) {
10751   case RISCVISD::FIRST_NUMBER:
10752     break;
10753   NODE_NAME_CASE(RET_FLAG)
10754   NODE_NAME_CASE(URET_FLAG)
10755   NODE_NAME_CASE(SRET_FLAG)
10756   NODE_NAME_CASE(MRET_FLAG)
10757   NODE_NAME_CASE(CALL)
10758   NODE_NAME_CASE(SELECT_CC)
10759   NODE_NAME_CASE(BR_CC)
10760   NODE_NAME_CASE(BuildPairF64)
10761   NODE_NAME_CASE(SplitF64)
10762   NODE_NAME_CASE(TAIL)
10763   NODE_NAME_CASE(MULHSU)
10764   NODE_NAME_CASE(SLLW)
10765   NODE_NAME_CASE(SRAW)
10766   NODE_NAME_CASE(SRLW)
10767   NODE_NAME_CASE(DIVW)
10768   NODE_NAME_CASE(DIVUW)
10769   NODE_NAME_CASE(REMUW)
10770   NODE_NAME_CASE(ROLW)
10771   NODE_NAME_CASE(RORW)
10772   NODE_NAME_CASE(CLZW)
10773   NODE_NAME_CASE(CTZW)
10774   NODE_NAME_CASE(FSLW)
10775   NODE_NAME_CASE(FSRW)
10776   NODE_NAME_CASE(FSL)
10777   NODE_NAME_CASE(FSR)
10778   NODE_NAME_CASE(FMV_H_X)
10779   NODE_NAME_CASE(FMV_X_ANYEXTH)
10780   NODE_NAME_CASE(FMV_X_SIGNEXTH)
10781   NODE_NAME_CASE(FMV_W_X_RV64)
10782   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10783   NODE_NAME_CASE(FCVT_X)
10784   NODE_NAME_CASE(FCVT_XU)
10785   NODE_NAME_CASE(FCVT_W_RV64)
10786   NODE_NAME_CASE(FCVT_WU_RV64)
10787   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10788   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10789   NODE_NAME_CASE(READ_CYCLE_WIDE)
10790   NODE_NAME_CASE(GREV)
10791   NODE_NAME_CASE(GREVW)
10792   NODE_NAME_CASE(GORC)
10793   NODE_NAME_CASE(GORCW)
10794   NODE_NAME_CASE(SHFL)
10795   NODE_NAME_CASE(SHFLW)
10796   NODE_NAME_CASE(UNSHFL)
10797   NODE_NAME_CASE(UNSHFLW)
10798   NODE_NAME_CASE(BFP)
10799   NODE_NAME_CASE(BFPW)
10800   NODE_NAME_CASE(BCOMPRESS)
10801   NODE_NAME_CASE(BCOMPRESSW)
10802   NODE_NAME_CASE(BDECOMPRESS)
10803   NODE_NAME_CASE(BDECOMPRESSW)
10804   NODE_NAME_CASE(VMV_V_X_VL)
10805   NODE_NAME_CASE(VFMV_V_F_VL)
10806   NODE_NAME_CASE(VMV_X_S)
10807   NODE_NAME_CASE(VMV_S_X_VL)
10808   NODE_NAME_CASE(VFMV_S_F_VL)
10809   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10810   NODE_NAME_CASE(READ_VLENB)
10811   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10812   NODE_NAME_CASE(VSLIDEUP_VL)
10813   NODE_NAME_CASE(VSLIDE1UP_VL)
10814   NODE_NAME_CASE(VSLIDEDOWN_VL)
10815   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10816   NODE_NAME_CASE(VID_VL)
10817   NODE_NAME_CASE(VFNCVT_ROD_VL)
10818   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10819   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10820   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10821   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10822   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10823   NODE_NAME_CASE(VECREDUCE_AND_VL)
10824   NODE_NAME_CASE(VECREDUCE_OR_VL)
10825   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10826   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10827   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10828   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10829   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10830   NODE_NAME_CASE(ADD_VL)
10831   NODE_NAME_CASE(AND_VL)
10832   NODE_NAME_CASE(MUL_VL)
10833   NODE_NAME_CASE(OR_VL)
10834   NODE_NAME_CASE(SDIV_VL)
10835   NODE_NAME_CASE(SHL_VL)
10836   NODE_NAME_CASE(SREM_VL)
10837   NODE_NAME_CASE(SRA_VL)
10838   NODE_NAME_CASE(SRL_VL)
10839   NODE_NAME_CASE(SUB_VL)
10840   NODE_NAME_CASE(UDIV_VL)
10841   NODE_NAME_CASE(UREM_VL)
10842   NODE_NAME_CASE(XOR_VL)
10843   NODE_NAME_CASE(SADDSAT_VL)
10844   NODE_NAME_CASE(UADDSAT_VL)
10845   NODE_NAME_CASE(SSUBSAT_VL)
10846   NODE_NAME_CASE(USUBSAT_VL)
10847   NODE_NAME_CASE(FADD_VL)
10848   NODE_NAME_CASE(FSUB_VL)
10849   NODE_NAME_CASE(FMUL_VL)
10850   NODE_NAME_CASE(FDIV_VL)
10851   NODE_NAME_CASE(FNEG_VL)
10852   NODE_NAME_CASE(FABS_VL)
10853   NODE_NAME_CASE(FSQRT_VL)
10854   NODE_NAME_CASE(FMA_VL)
10855   NODE_NAME_CASE(FCOPYSIGN_VL)
10856   NODE_NAME_CASE(SMIN_VL)
10857   NODE_NAME_CASE(SMAX_VL)
10858   NODE_NAME_CASE(UMIN_VL)
10859   NODE_NAME_CASE(UMAX_VL)
10860   NODE_NAME_CASE(FMINNUM_VL)
10861   NODE_NAME_CASE(FMAXNUM_VL)
10862   NODE_NAME_CASE(MULHS_VL)
10863   NODE_NAME_CASE(MULHU_VL)
10864   NODE_NAME_CASE(FP_TO_SINT_VL)
10865   NODE_NAME_CASE(FP_TO_UINT_VL)
10866   NODE_NAME_CASE(SINT_TO_FP_VL)
10867   NODE_NAME_CASE(UINT_TO_FP_VL)
10868   NODE_NAME_CASE(FP_EXTEND_VL)
10869   NODE_NAME_CASE(FP_ROUND_VL)
10870   NODE_NAME_CASE(VWMUL_VL)
10871   NODE_NAME_CASE(VWMULU_VL)
10872   NODE_NAME_CASE(VWMULSU_VL)
10873   NODE_NAME_CASE(VWADD_VL)
10874   NODE_NAME_CASE(VWADDU_VL)
10875   NODE_NAME_CASE(VWSUB_VL)
10876   NODE_NAME_CASE(VWSUBU_VL)
10877   NODE_NAME_CASE(VWADD_W_VL)
10878   NODE_NAME_CASE(VWADDU_W_VL)
10879   NODE_NAME_CASE(VWSUB_W_VL)
10880   NODE_NAME_CASE(VWSUBU_W_VL)
10881   NODE_NAME_CASE(SETCC_VL)
10882   NODE_NAME_CASE(VSELECT_VL)
10883   NODE_NAME_CASE(VP_MERGE_VL)
10884   NODE_NAME_CASE(VMAND_VL)
10885   NODE_NAME_CASE(VMOR_VL)
10886   NODE_NAME_CASE(VMXOR_VL)
10887   NODE_NAME_CASE(VMCLR_VL)
10888   NODE_NAME_CASE(VMSET_VL)
10889   NODE_NAME_CASE(VRGATHER_VX_VL)
10890   NODE_NAME_CASE(VRGATHER_VV_VL)
10891   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10892   NODE_NAME_CASE(VSEXT_VL)
10893   NODE_NAME_CASE(VZEXT_VL)
10894   NODE_NAME_CASE(VCPOP_VL)
10895   NODE_NAME_CASE(READ_CSR)
10896   NODE_NAME_CASE(WRITE_CSR)
10897   NODE_NAME_CASE(SWAP_CSR)
10898   }
10899   // clang-format on
10900   return nullptr;
10901 #undef NODE_NAME_CASE
10902 }
10903 
10904 /// getConstraintType - Given a constraint letter, return the type of
10905 /// constraint it is for this target.
10906 RISCVTargetLowering::ConstraintType
10907 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10908   if (Constraint.size() == 1) {
10909     switch (Constraint[0]) {
10910     default:
10911       break;
10912     case 'f':
10913       return C_RegisterClass;
10914     case 'I':
10915     case 'J':
10916     case 'K':
10917       return C_Immediate;
10918     case 'A':
10919       return C_Memory;
10920     case 'S': // A symbolic address
10921       return C_Other;
10922     }
10923   } else {
10924     if (Constraint == "vr" || Constraint == "vm")
10925       return C_RegisterClass;
10926   }
10927   return TargetLowering::getConstraintType(Constraint);
10928 }
10929 
10930 std::pair<unsigned, const TargetRegisterClass *>
10931 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10932                                                   StringRef Constraint,
10933                                                   MVT VT) const {
10934   // First, see if this is a constraint that directly corresponds to a
10935   // RISCV register class.
10936   if (Constraint.size() == 1) {
10937     switch (Constraint[0]) {
10938     case 'r':
10939       // TODO: Support fixed vectors up to XLen for P extension?
10940       if (VT.isVector())
10941         break;
10942       return std::make_pair(0U, &RISCV::GPRRegClass);
10943     case 'f':
10944       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10945         return std::make_pair(0U, &RISCV::FPR16RegClass);
10946       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10947         return std::make_pair(0U, &RISCV::FPR32RegClass);
10948       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10949         return std::make_pair(0U, &RISCV::FPR64RegClass);
10950       break;
10951     default:
10952       break;
10953     }
10954   } else if (Constraint == "vr") {
10955     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10956                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10957       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10958         return std::make_pair(0U, RC);
10959     }
10960   } else if (Constraint == "vm") {
10961     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10962       return std::make_pair(0U, &RISCV::VMV0RegClass);
10963   }
10964 
10965   // Clang will correctly decode the usage of register name aliases into their
10966   // official names. However, other frontends like `rustc` do not. This allows
10967   // users of these frontends to use the ABI names for registers in LLVM-style
10968   // register constraints.
10969   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10970                                .Case("{zero}", RISCV::X0)
10971                                .Case("{ra}", RISCV::X1)
10972                                .Case("{sp}", RISCV::X2)
10973                                .Case("{gp}", RISCV::X3)
10974                                .Case("{tp}", RISCV::X4)
10975                                .Case("{t0}", RISCV::X5)
10976                                .Case("{t1}", RISCV::X6)
10977                                .Case("{t2}", RISCV::X7)
10978                                .Cases("{s0}", "{fp}", RISCV::X8)
10979                                .Case("{s1}", RISCV::X9)
10980                                .Case("{a0}", RISCV::X10)
10981                                .Case("{a1}", RISCV::X11)
10982                                .Case("{a2}", RISCV::X12)
10983                                .Case("{a3}", RISCV::X13)
10984                                .Case("{a4}", RISCV::X14)
10985                                .Case("{a5}", RISCV::X15)
10986                                .Case("{a6}", RISCV::X16)
10987                                .Case("{a7}", RISCV::X17)
10988                                .Case("{s2}", RISCV::X18)
10989                                .Case("{s3}", RISCV::X19)
10990                                .Case("{s4}", RISCV::X20)
10991                                .Case("{s5}", RISCV::X21)
10992                                .Case("{s6}", RISCV::X22)
10993                                .Case("{s7}", RISCV::X23)
10994                                .Case("{s8}", RISCV::X24)
10995                                .Case("{s9}", RISCV::X25)
10996                                .Case("{s10}", RISCV::X26)
10997                                .Case("{s11}", RISCV::X27)
10998                                .Case("{t3}", RISCV::X28)
10999                                .Case("{t4}", RISCV::X29)
11000                                .Case("{t5}", RISCV::X30)
11001                                .Case("{t6}", RISCV::X31)
11002                                .Default(RISCV::NoRegister);
11003   if (XRegFromAlias != RISCV::NoRegister)
11004     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11005 
11006   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11007   // TableGen record rather than the AsmName to choose registers for InlineAsm
11008   // constraints, plus we want to match those names to the widest floating point
11009   // register type available, manually select floating point registers here.
11010   //
11011   // The second case is the ABI name of the register, so that frontends can also
11012   // use the ABI names in register constraint lists.
11013   if (Subtarget.hasStdExtF()) {
11014     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11015                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11016                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11017                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11018                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11019                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11020                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11021                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11022                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11023                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11024                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11025                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11026                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11027                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11028                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11029                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11030                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11031                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11032                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11033                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11034                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11035                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11036                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11037                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11038                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11039                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11040                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11041                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11042                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11043                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11044                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11045                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11046                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11047                         .Default(RISCV::NoRegister);
11048     if (FReg != RISCV::NoRegister) {
11049       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11050       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11051         unsigned RegNo = FReg - RISCV::F0_F;
11052         unsigned DReg = RISCV::F0_D + RegNo;
11053         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11054       }
11055       if (VT == MVT::f32 || VT == MVT::Other)
11056         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11057       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11058         unsigned RegNo = FReg - RISCV::F0_F;
11059         unsigned HReg = RISCV::F0_H + RegNo;
11060         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11061       }
11062     }
11063   }
11064 
11065   if (Subtarget.hasVInstructions()) {
11066     Register VReg = StringSwitch<Register>(Constraint.lower())
11067                         .Case("{v0}", RISCV::V0)
11068                         .Case("{v1}", RISCV::V1)
11069                         .Case("{v2}", RISCV::V2)
11070                         .Case("{v3}", RISCV::V3)
11071                         .Case("{v4}", RISCV::V4)
11072                         .Case("{v5}", RISCV::V5)
11073                         .Case("{v6}", RISCV::V6)
11074                         .Case("{v7}", RISCV::V7)
11075                         .Case("{v8}", RISCV::V8)
11076                         .Case("{v9}", RISCV::V9)
11077                         .Case("{v10}", RISCV::V10)
11078                         .Case("{v11}", RISCV::V11)
11079                         .Case("{v12}", RISCV::V12)
11080                         .Case("{v13}", RISCV::V13)
11081                         .Case("{v14}", RISCV::V14)
11082                         .Case("{v15}", RISCV::V15)
11083                         .Case("{v16}", RISCV::V16)
11084                         .Case("{v17}", RISCV::V17)
11085                         .Case("{v18}", RISCV::V18)
11086                         .Case("{v19}", RISCV::V19)
11087                         .Case("{v20}", RISCV::V20)
11088                         .Case("{v21}", RISCV::V21)
11089                         .Case("{v22}", RISCV::V22)
11090                         .Case("{v23}", RISCV::V23)
11091                         .Case("{v24}", RISCV::V24)
11092                         .Case("{v25}", RISCV::V25)
11093                         .Case("{v26}", RISCV::V26)
11094                         .Case("{v27}", RISCV::V27)
11095                         .Case("{v28}", RISCV::V28)
11096                         .Case("{v29}", RISCV::V29)
11097                         .Case("{v30}", RISCV::V30)
11098                         .Case("{v31}", RISCV::V31)
11099                         .Default(RISCV::NoRegister);
11100     if (VReg != RISCV::NoRegister) {
11101       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11102         return std::make_pair(VReg, &RISCV::VMRegClass);
11103       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11104         return std::make_pair(VReg, &RISCV::VRRegClass);
11105       for (const auto *RC :
11106            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11107         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11108           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11109           return std::make_pair(VReg, RC);
11110         }
11111       }
11112     }
11113   }
11114 
11115   std::pair<Register, const TargetRegisterClass *> Res =
11116       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11117 
11118   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11119   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11120   // Subtarget into account.
11121   if (Res.second == &RISCV::GPRF16RegClass ||
11122       Res.second == &RISCV::GPRF32RegClass ||
11123       Res.second == &RISCV::GPRF64RegClass)
11124     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11125 
11126   return Res;
11127 }
11128 
11129 unsigned
11130 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11131   // Currently only support length 1 constraints.
11132   if (ConstraintCode.size() == 1) {
11133     switch (ConstraintCode[0]) {
11134     case 'A':
11135       return InlineAsm::Constraint_A;
11136     default:
11137       break;
11138     }
11139   }
11140 
11141   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11142 }
11143 
11144 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11145     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11146     SelectionDAG &DAG) const {
11147   // Currently only support length 1 constraints.
11148   if (Constraint.length() == 1) {
11149     switch (Constraint[0]) {
11150     case 'I':
11151       // Validate & create a 12-bit signed immediate operand.
11152       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11153         uint64_t CVal = C->getSExtValue();
11154         if (isInt<12>(CVal))
11155           Ops.push_back(
11156               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11157       }
11158       return;
11159     case 'J':
11160       // Validate & create an integer zero operand.
11161       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11162         if (C->getZExtValue() == 0)
11163           Ops.push_back(
11164               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11165       return;
11166     case 'K':
11167       // Validate & create a 5-bit unsigned immediate operand.
11168       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11169         uint64_t CVal = C->getZExtValue();
11170         if (isUInt<5>(CVal))
11171           Ops.push_back(
11172               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11173       }
11174       return;
11175     case 'S':
11176       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11177         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11178                                                  GA->getValueType(0)));
11179       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11180         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11181                                                 BA->getValueType(0)));
11182       }
11183       return;
11184     default:
11185       break;
11186     }
11187   }
11188   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11189 }
11190 
11191 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11192                                                    Instruction *Inst,
11193                                                    AtomicOrdering Ord) const {
11194   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11195     return Builder.CreateFence(Ord);
11196   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11197     return Builder.CreateFence(AtomicOrdering::Release);
11198   return nullptr;
11199 }
11200 
11201 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11202                                                     Instruction *Inst,
11203                                                     AtomicOrdering Ord) const {
11204   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11205     return Builder.CreateFence(AtomicOrdering::Acquire);
11206   return nullptr;
11207 }
11208 
11209 TargetLowering::AtomicExpansionKind
11210 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11211   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11212   // point operations can't be used in an lr/sc sequence without breaking the
11213   // forward-progress guarantee.
11214   if (AI->isFloatingPointOperation())
11215     return AtomicExpansionKind::CmpXChg;
11216 
11217   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11218   if (Size == 8 || Size == 16)
11219     return AtomicExpansionKind::MaskedIntrinsic;
11220   return AtomicExpansionKind::None;
11221 }
11222 
11223 static Intrinsic::ID
11224 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11225   if (XLen == 32) {
11226     switch (BinOp) {
11227     default:
11228       llvm_unreachable("Unexpected AtomicRMW BinOp");
11229     case AtomicRMWInst::Xchg:
11230       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11231     case AtomicRMWInst::Add:
11232       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11233     case AtomicRMWInst::Sub:
11234       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11235     case AtomicRMWInst::Nand:
11236       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11237     case AtomicRMWInst::Max:
11238       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11239     case AtomicRMWInst::Min:
11240       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11241     case AtomicRMWInst::UMax:
11242       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11243     case AtomicRMWInst::UMin:
11244       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11245     }
11246   }
11247 
11248   if (XLen == 64) {
11249     switch (BinOp) {
11250     default:
11251       llvm_unreachable("Unexpected AtomicRMW BinOp");
11252     case AtomicRMWInst::Xchg:
11253       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11254     case AtomicRMWInst::Add:
11255       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11256     case AtomicRMWInst::Sub:
11257       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11258     case AtomicRMWInst::Nand:
11259       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11260     case AtomicRMWInst::Max:
11261       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11262     case AtomicRMWInst::Min:
11263       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11264     case AtomicRMWInst::UMax:
11265       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11266     case AtomicRMWInst::UMin:
11267       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11268     }
11269   }
11270 
11271   llvm_unreachable("Unexpected XLen\n");
11272 }
11273 
11274 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11275     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11276     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11277   unsigned XLen = Subtarget.getXLen();
11278   Value *Ordering =
11279       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11280   Type *Tys[] = {AlignedAddr->getType()};
11281   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11282       AI->getModule(),
11283       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11284 
11285   if (XLen == 64) {
11286     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11287     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11288     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11289   }
11290 
11291   Value *Result;
11292 
11293   // Must pass the shift amount needed to sign extend the loaded value prior
11294   // to performing a signed comparison for min/max. ShiftAmt is the number of
11295   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11296   // is the number of bits to left+right shift the value in order to
11297   // sign-extend.
11298   if (AI->getOperation() == AtomicRMWInst::Min ||
11299       AI->getOperation() == AtomicRMWInst::Max) {
11300     const DataLayout &DL = AI->getModule()->getDataLayout();
11301     unsigned ValWidth =
11302         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11303     Value *SextShamt =
11304         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11305     Result = Builder.CreateCall(LrwOpScwLoop,
11306                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11307   } else {
11308     Result =
11309         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11310   }
11311 
11312   if (XLen == 64)
11313     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11314   return Result;
11315 }
11316 
11317 TargetLowering::AtomicExpansionKind
11318 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11319     AtomicCmpXchgInst *CI) const {
11320   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11321   if (Size == 8 || Size == 16)
11322     return AtomicExpansionKind::MaskedIntrinsic;
11323   return AtomicExpansionKind::None;
11324 }
11325 
11326 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11327     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11328     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11329   unsigned XLen = Subtarget.getXLen();
11330   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11331   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11332   if (XLen == 64) {
11333     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11334     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11335     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11336     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11337   }
11338   Type *Tys[] = {AlignedAddr->getType()};
11339   Function *MaskedCmpXchg =
11340       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11341   Value *Result = Builder.CreateCall(
11342       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11343   if (XLen == 64)
11344     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11345   return Result;
11346 }
11347 
11348 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11349   return false;
11350 }
11351 
11352 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11353                                                EVT VT) const {
11354   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11355     return false;
11356 
11357   switch (FPVT.getSimpleVT().SimpleTy) {
11358   case MVT::f16:
11359     return Subtarget.hasStdExtZfh();
11360   case MVT::f32:
11361     return Subtarget.hasStdExtF();
11362   case MVT::f64:
11363     return Subtarget.hasStdExtD();
11364   default:
11365     return false;
11366   }
11367 }
11368 
11369 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11370   // If we are using the small code model, we can reduce size of jump table
11371   // entry to 4 bytes.
11372   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11373       getTargetMachine().getCodeModel() == CodeModel::Small) {
11374     return MachineJumpTableInfo::EK_Custom32;
11375   }
11376   return TargetLowering::getJumpTableEncoding();
11377 }
11378 
11379 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11380     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11381     unsigned uid, MCContext &Ctx) const {
11382   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11383          getTargetMachine().getCodeModel() == CodeModel::Small);
11384   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11385 }
11386 
11387 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11388                                                      EVT VT) const {
11389   VT = VT.getScalarType();
11390 
11391   if (!VT.isSimple())
11392     return false;
11393 
11394   switch (VT.getSimpleVT().SimpleTy) {
11395   case MVT::f16:
11396     return Subtarget.hasStdExtZfh();
11397   case MVT::f32:
11398     return Subtarget.hasStdExtF();
11399   case MVT::f64:
11400     return Subtarget.hasStdExtD();
11401   default:
11402     break;
11403   }
11404 
11405   return false;
11406 }
11407 
11408 Register RISCVTargetLowering::getExceptionPointerRegister(
11409     const Constant *PersonalityFn) const {
11410   return RISCV::X10;
11411 }
11412 
11413 Register RISCVTargetLowering::getExceptionSelectorRegister(
11414     const Constant *PersonalityFn) const {
11415   return RISCV::X11;
11416 }
11417 
11418 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11419   // Return false to suppress the unnecessary extensions if the LibCall
11420   // arguments or return value is f32 type for LP64 ABI.
11421   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11422   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11423     return false;
11424 
11425   return true;
11426 }
11427 
11428 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11429   if (Subtarget.is64Bit() && Type == MVT::i32)
11430     return true;
11431 
11432   return IsSigned;
11433 }
11434 
11435 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11436                                                  SDValue C) const {
11437   // Check integral scalar types.
11438   if (VT.isScalarInteger()) {
11439     // Omit the optimization if the sub target has the M extension and the data
11440     // size exceeds XLen.
11441     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11442       return false;
11443     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11444       // Break the MUL to a SLLI and an ADD/SUB.
11445       const APInt &Imm = ConstNode->getAPIntValue();
11446       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11447           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11448         return true;
11449       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11450       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11451           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11452            (Imm - 8).isPowerOf2()))
11453         return true;
11454       // Omit the following optimization if the sub target has the M extension
11455       // and the data size >= XLen.
11456       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11457         return false;
11458       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11459       // a pair of LUI/ADDI.
11460       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11461         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11462         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11463             (1 - ImmS).isPowerOf2())
11464         return true;
11465       }
11466     }
11467   }
11468 
11469   return false;
11470 }
11471 
11472 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11473                                                       SDValue ConstNode) const {
11474   // Let the DAGCombiner decide for vectors.
11475   EVT VT = AddNode.getValueType();
11476   if (VT.isVector())
11477     return true;
11478 
11479   // Let the DAGCombiner decide for larger types.
11480   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11481     return true;
11482 
11483   // It is worse if c1 is simm12 while c1*c2 is not.
11484   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11485   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11486   const APInt &C1 = C1Node->getAPIntValue();
11487   const APInt &C2 = C2Node->getAPIntValue();
11488   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11489     return false;
11490 
11491   // Default to true and let the DAGCombiner decide.
11492   return true;
11493 }
11494 
11495 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11496     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11497     bool *Fast) const {
11498   if (!VT.isVector())
11499     return false;
11500 
11501   EVT ElemVT = VT.getVectorElementType();
11502   if (Alignment >= ElemVT.getStoreSize()) {
11503     if (Fast)
11504       *Fast = true;
11505     return true;
11506   }
11507 
11508   return false;
11509 }
11510 
11511 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11512     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11513     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11514   bool IsABIRegCopy = CC.hasValue();
11515   EVT ValueVT = Val.getValueType();
11516   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11517     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11518     // and cast to f32.
11519     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11520     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11521     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11522                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11523     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11524     Parts[0] = Val;
11525     return true;
11526   }
11527 
11528   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11529     LLVMContext &Context = *DAG.getContext();
11530     EVT ValueEltVT = ValueVT.getVectorElementType();
11531     EVT PartEltVT = PartVT.getVectorElementType();
11532     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11533     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11534     if (PartVTBitSize % ValueVTBitSize == 0) {
11535       assert(PartVTBitSize >= ValueVTBitSize);
11536       // If the element types are different, bitcast to the same element type of
11537       // PartVT first.
11538       // Give an example here, we want copy a <vscale x 1 x i8> value to
11539       // <vscale x 4 x i16>.
11540       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11541       // subvector, then we can bitcast to <vscale x 4 x i16>.
11542       if (ValueEltVT != PartEltVT) {
11543         if (PartVTBitSize > ValueVTBitSize) {
11544           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11545           assert(Count != 0 && "The number of element should not be zero.");
11546           EVT SameEltTypeVT =
11547               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11548           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11549                             DAG.getUNDEF(SameEltTypeVT), Val,
11550                             DAG.getVectorIdxConstant(0, DL));
11551         }
11552         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11553       } else {
11554         Val =
11555             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11556                         Val, DAG.getVectorIdxConstant(0, DL));
11557       }
11558       Parts[0] = Val;
11559       return true;
11560     }
11561   }
11562   return false;
11563 }
11564 
11565 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11566     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11567     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11568   bool IsABIRegCopy = CC.hasValue();
11569   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11570     SDValue Val = Parts[0];
11571 
11572     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11573     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11574     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11575     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11576     return Val;
11577   }
11578 
11579   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11580     LLVMContext &Context = *DAG.getContext();
11581     SDValue Val = Parts[0];
11582     EVT ValueEltVT = ValueVT.getVectorElementType();
11583     EVT PartEltVT = PartVT.getVectorElementType();
11584     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11585     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11586     if (PartVTBitSize % ValueVTBitSize == 0) {
11587       assert(PartVTBitSize >= ValueVTBitSize);
11588       EVT SameEltTypeVT = ValueVT;
11589       // If the element types are different, convert it to the same element type
11590       // of PartVT.
11591       // Give an example here, we want copy a <vscale x 1 x i8> value from
11592       // <vscale x 4 x i16>.
11593       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11594       // then we can extract <vscale x 1 x i8>.
11595       if (ValueEltVT != PartEltVT) {
11596         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11597         assert(Count != 0 && "The number of element should not be zero.");
11598         SameEltTypeVT =
11599             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11600         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11601       }
11602       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11603                         DAG.getVectorIdxConstant(0, DL));
11604       return Val;
11605     }
11606   }
11607   return SDValue();
11608 }
11609 
11610 SDValue
11611 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11612                                    SelectionDAG &DAG,
11613                                    SmallVectorImpl<SDNode *> &Created) const {
11614   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11615   if (isIntDivCheap(N->getValueType(0), Attr))
11616     return SDValue(N, 0); // Lower SDIV as SDIV
11617 
11618   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11619          "Unexpected divisor!");
11620 
11621   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11622   if (!Subtarget.hasStdExtZbt())
11623     return SDValue();
11624 
11625   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11626   // Besides, more critical path instructions will be generated when dividing
11627   // by 2. So we keep using the original DAGs for these cases.
11628   unsigned Lg2 = Divisor.countTrailingZeros();
11629   if (Lg2 == 1 || Lg2 >= 12)
11630     return SDValue();
11631 
11632   // fold (sdiv X, pow2)
11633   EVT VT = N->getValueType(0);
11634   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11635     return SDValue();
11636 
11637   SDLoc DL(N);
11638   SDValue N0 = N->getOperand(0);
11639   SDValue Zero = DAG.getConstant(0, DL, VT);
11640   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11641 
11642   // Add (N0 < 0) ? Pow2 - 1 : 0;
11643   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11644   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11645   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11646 
11647   Created.push_back(Cmp.getNode());
11648   Created.push_back(Add.getNode());
11649   Created.push_back(Sel.getNode());
11650 
11651   // Divide by pow2.
11652   SDValue SRA =
11653       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11654 
11655   // If we're dividing by a positive value, we're done.  Otherwise, we must
11656   // negate the result.
11657   if (Divisor.isNonNegative())
11658     return SRA;
11659 
11660   Created.push_back(SRA.getNode());
11661   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11662 }
11663 
11664 #define GET_REGISTER_MATCHER
11665 #include "RISCVGenAsmMatcher.inc"
11666 
11667 Register
11668 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11669                                        const MachineFunction &MF) const {
11670   Register Reg = MatchRegisterAltName(RegName);
11671   if (Reg == RISCV::NoRegister)
11672     Reg = MatchRegisterName(RegName);
11673   if (Reg == RISCV::NoRegister)
11674     report_fatal_error(
11675         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11676   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11677   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11678     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11679                              StringRef(RegName) + "\"."));
11680   return Reg;
11681 }
11682 
11683 namespace llvm {
11684 namespace RISCVVIntrinsicsTable {
11685 
11686 #define GET_RISCVVIntrinsicsTable_IMPL
11687 #include "RISCVGenSearchableTables.inc"
11688 
11689 } // namespace RISCVVIntrinsicsTable
11690 
11691 } // namespace llvm
11692