1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306   }
307 
308   if (Subtarget.hasStdExtZbt()) {
309     setOperationAction(ISD::FSHL, XLenVT, Custom);
310     setOperationAction(ISD::FSHR, XLenVT, Custom);
311     setOperationAction(ISD::SELECT, XLenVT, Legal);
312 
313     if (Subtarget.is64Bit()) {
314       setOperationAction(ISD::FSHL, MVT::i32, Custom);
315       setOperationAction(ISD::FSHR, MVT::i32, Custom);
316     }
317   } else {
318     setOperationAction(ISD::SELECT, XLenVT, Custom);
319   }
320 
321   static const ISD::CondCode FPCCToExpand[] = {
322       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
323       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
324       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
325 
326   static const ISD::NodeType FPOpToExpand[] = {
327       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
328       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
329 
330   if (Subtarget.hasStdExtZfh())
331     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
332 
333   if (Subtarget.hasStdExtZfh()) {
334     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
336     setOperationAction(ISD::LRINT, MVT::f16, Legal);
337     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
338     setOperationAction(ISD::LROUND, MVT::f16, Legal);
339     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
345     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
346     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
349     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
350     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
351     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
352     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
353     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
354     for (auto CC : FPCCToExpand)
355       setCondCodeAction(CC, MVT::f16, Expand);
356     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
357     setOperationAction(ISD::SELECT, MVT::f16, Custom);
358     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
359 
360     setOperationAction(ISD::FREM,       MVT::f16, Promote);
361     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
362     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
363     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
364     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
365     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
366     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
367     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
368     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
369     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
370     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
371     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
372     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
373     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
374     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
375     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
376     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
377     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
378 
379     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
380     // complete support for all operations in LegalizeDAG.
381 
382     // We need to custom promote this.
383     if (Subtarget.is64Bit())
384       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
385   }
386 
387   if (Subtarget.hasStdExtF()) {
388     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
389     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
390     setOperationAction(ISD::LRINT, MVT::f32, Legal);
391     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
392     setOperationAction(ISD::LROUND, MVT::f32, Legal);
393     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
395     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
396     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
397     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
398     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
399     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
400     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
401     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
402     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
404     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
405     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
406     for (auto CC : FPCCToExpand)
407       setCondCodeAction(CC, MVT::f32, Expand);
408     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
409     setOperationAction(ISD::SELECT, MVT::f32, Custom);
410     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f32, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
418     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
419 
420   if (Subtarget.hasStdExtD()) {
421     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
422     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
423     setOperationAction(ISD::LRINT, MVT::f64, Legal);
424     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
425     setOperationAction(ISD::LROUND, MVT::f64, Legal);
426     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
428     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
429     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
431     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
432     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
433     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
435     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
436     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
437     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
438     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
439     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
440     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
441     for (auto CC : FPCCToExpand)
442       setCondCodeAction(CC, MVT::f64, Expand);
443     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
444     setOperationAction(ISD::SELECT, MVT::f64, Custom);
445     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
446     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
447     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
448     for (auto Op : FPOpToExpand)
449       setOperationAction(Op, MVT::f64, Expand);
450     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
451     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
452   }
453 
454   if (Subtarget.is64Bit()) {
455     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
456     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
457     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
458     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
459   }
460 
461   if (Subtarget.hasStdExtF()) {
462     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
463     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
464 
465     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
466     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
467     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
468     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
469 
470     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
471     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
472   }
473 
474   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
475   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
476   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
477   setOperationAction(ISD::JumpTable, XLenVT, Custom);
478 
479   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
480 
481   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
482   // Unfortunately this can't be determined just from the ISA naming string.
483   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
484                      Subtarget.is64Bit() ? Legal : Custom);
485 
486   setOperationAction(ISD::TRAP, MVT::Other, Legal);
487   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
488   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
489   if (Subtarget.is64Bit())
490     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
491 
492   if (Subtarget.hasStdExtA()) {
493     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
494     setMinCmpXchgSizeInBits(32);
495   } else {
496     setMaxAtomicSizeInBitsSupported(0);
497   }
498 
499   setBooleanContents(ZeroOrOneBooleanContent);
500 
501   if (Subtarget.hasVInstructions()) {
502     setBooleanVectorContents(ZeroOrOneBooleanContent);
503 
504     setOperationAction(ISD::VSCALE, XLenVT, Custom);
505 
506     // RVV intrinsics may have illegal operands.
507     // We also need to custom legalize vmv.x.s.
508     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
509     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
510     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
511     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
512     if (Subtarget.is64Bit()) {
513       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
514     } else {
515       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
516       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
517     }
518 
519     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
520     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
521 
522     static const unsigned IntegerVPOps[] = {
523         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
524         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
525         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
526         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
527         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
528         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
529         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
530         ISD::VP_MERGE,       ISD::VP_SELECT};
531 
532     static const unsigned FloatingPointVPOps[] = {
533         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
534         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
535         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
536         ISD::VP_SELECT};
537 
538     if (!Subtarget.is64Bit()) {
539       // We must custom-lower certain vXi64 operations on RV32 due to the vector
540       // element type being illegal.
541       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
542       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
543 
544       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
545       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
546       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
547       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
548       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
549       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
550       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
551       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
552 
553       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
554       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
555       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
556       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
557       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
558       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
559       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
560       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
561     }
562 
563     for (MVT VT : BoolVecVTs) {
564       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
565 
566       // Mask VTs are custom-expanded into a series of standard nodes
567       setOperationAction(ISD::TRUNCATE, VT, Custom);
568       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
569       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
570       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
571 
572       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
573       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
574 
575       setOperationAction(ISD::SELECT, VT, Custom);
576       setOperationAction(ISD::SELECT_CC, VT, Expand);
577       setOperationAction(ISD::VSELECT, VT, Expand);
578       setOperationAction(ISD::VP_MERGE, VT, Expand);
579       setOperationAction(ISD::VP_SELECT, VT, Expand);
580 
581       setOperationAction(ISD::VP_AND, VT, Custom);
582       setOperationAction(ISD::VP_OR, VT, Custom);
583       setOperationAction(ISD::VP_XOR, VT, Custom);
584 
585       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
586       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
587       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
588 
589       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
590       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
591       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
592 
593       // RVV has native int->float & float->int conversions where the
594       // element type sizes are within one power-of-two of each other. Any
595       // wider distances between type sizes have to be lowered as sequences
596       // which progressively narrow the gap in stages.
597       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
598       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
599       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
600       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
601 
602       // Expand all extending loads to types larger than this, and truncating
603       // stores from types larger than this.
604       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
605         setTruncStoreAction(OtherVT, VT, Expand);
606         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
607         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
608         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
609       }
610     }
611 
612     for (MVT VT : IntVecVTs) {
613       if (VT.getVectorElementType() == MVT::i64 &&
614           !Subtarget.hasVInstructionsI64())
615         continue;
616 
617       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
618       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
619 
620       // Vectors implement MULHS/MULHU.
621       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
622       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
623 
624       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
625       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
626         setOperationAction(ISD::MULHU, VT, Expand);
627         setOperationAction(ISD::MULHS, VT, Expand);
628       }
629 
630       setOperationAction(ISD::SMIN, VT, Legal);
631       setOperationAction(ISD::SMAX, VT, Legal);
632       setOperationAction(ISD::UMIN, VT, Legal);
633       setOperationAction(ISD::UMAX, VT, Legal);
634 
635       setOperationAction(ISD::ROTL, VT, Expand);
636       setOperationAction(ISD::ROTR, VT, Expand);
637 
638       setOperationAction(ISD::CTTZ, VT, Expand);
639       setOperationAction(ISD::CTLZ, VT, Expand);
640       setOperationAction(ISD::CTPOP, VT, Expand);
641 
642       setOperationAction(ISD::BSWAP, VT, Expand);
643 
644       // Custom-lower extensions and truncations from/to mask types.
645       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
646       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
647       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
648 
649       // RVV has native int->float & float->int conversions where the
650       // element type sizes are within one power-of-two of each other. Any
651       // wider distances between type sizes have to be lowered as sequences
652       // which progressively narrow the gap in stages.
653       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
654       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
655       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
656       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
657 
658       setOperationAction(ISD::SADDSAT, VT, Legal);
659       setOperationAction(ISD::UADDSAT, VT, Legal);
660       setOperationAction(ISD::SSUBSAT, VT, Legal);
661       setOperationAction(ISD::USUBSAT, VT, Legal);
662 
663       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
664       // nodes which truncate by one power of two at a time.
665       setOperationAction(ISD::TRUNCATE, VT, Custom);
666 
667       // Custom-lower insert/extract operations to simplify patterns.
668       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
669       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
670 
671       // Custom-lower reduction operations to set up the corresponding custom
672       // nodes' operands.
673       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
674       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
675       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
676       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
677       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
678       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
679       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
680       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
681 
682       for (unsigned VPOpc : IntegerVPOps)
683         setOperationAction(VPOpc, VT, Custom);
684 
685       setOperationAction(ISD::LOAD, VT, Custom);
686       setOperationAction(ISD::STORE, VT, Custom);
687 
688       setOperationAction(ISD::MLOAD, VT, Custom);
689       setOperationAction(ISD::MSTORE, VT, Custom);
690       setOperationAction(ISD::MGATHER, VT, Custom);
691       setOperationAction(ISD::MSCATTER, VT, Custom);
692 
693       setOperationAction(ISD::VP_LOAD, VT, Custom);
694       setOperationAction(ISD::VP_STORE, VT, Custom);
695       setOperationAction(ISD::VP_GATHER, VT, Custom);
696       setOperationAction(ISD::VP_SCATTER, VT, Custom);
697 
698       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
699       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
700       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
701 
702       setOperationAction(ISD::SELECT, VT, Custom);
703       setOperationAction(ISD::SELECT_CC, VT, Expand);
704 
705       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
706       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
707 
708       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
709         setTruncStoreAction(VT, OtherVT, Expand);
710         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
711         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
712         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
713       }
714 
715       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
716       // type that can represent the value exactly.
717       if (VT.getVectorElementType() != MVT::i64) {
718         MVT FloatEltVT =
719             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
720         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
721         if (isTypeLegal(FloatVT)) {
722           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
723           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
724         }
725       }
726     }
727 
728     // Expand various CCs to best match the RVV ISA, which natively supports UNE
729     // but no other unordered comparisons, and supports all ordered comparisons
730     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
731     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
732     // and we pattern-match those back to the "original", swapping operands once
733     // more. This way we catch both operations and both "vf" and "fv" forms with
734     // fewer patterns.
735     static const ISD::CondCode VFPCCToExpand[] = {
736         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
737         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
738         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
739     };
740 
741     // Sets common operation actions on RVV floating-point vector types.
742     const auto SetCommonVFPActions = [&](MVT VT) {
743       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
744       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
745       // sizes are within one power-of-two of each other. Therefore conversions
746       // between vXf16 and vXf64 must be lowered as sequences which convert via
747       // vXf32.
748       setOperationAction(ISD::FP_ROUND, VT, Custom);
749       setOperationAction(ISD::FP_EXTEND, VT, Custom);
750       // Custom-lower insert/extract operations to simplify patterns.
751       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
752       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
753       // Expand various condition codes (explained above).
754       for (auto CC : VFPCCToExpand)
755         setCondCodeAction(CC, VT, Expand);
756 
757       setOperationAction(ISD::FMINNUM, VT, Legal);
758       setOperationAction(ISD::FMAXNUM, VT, Legal);
759 
760       setOperationAction(ISD::FTRUNC, VT, Custom);
761       setOperationAction(ISD::FCEIL, VT, Custom);
762       setOperationAction(ISD::FFLOOR, VT, Custom);
763       setOperationAction(ISD::FROUND, VT, Custom);
764 
765       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
766       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
767       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
768       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
769 
770       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
771 
772       setOperationAction(ISD::LOAD, VT, Custom);
773       setOperationAction(ISD::STORE, VT, Custom);
774 
775       setOperationAction(ISD::MLOAD, VT, Custom);
776       setOperationAction(ISD::MSTORE, VT, Custom);
777       setOperationAction(ISD::MGATHER, VT, Custom);
778       setOperationAction(ISD::MSCATTER, VT, Custom);
779 
780       setOperationAction(ISD::VP_LOAD, VT, Custom);
781       setOperationAction(ISD::VP_STORE, VT, Custom);
782       setOperationAction(ISD::VP_GATHER, VT, Custom);
783       setOperationAction(ISD::VP_SCATTER, VT, Custom);
784 
785       setOperationAction(ISD::SELECT, VT, Custom);
786       setOperationAction(ISD::SELECT_CC, VT, Expand);
787 
788       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
789       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
790       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
791 
792       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
793 
794       for (unsigned VPOpc : FloatingPointVPOps)
795         setOperationAction(VPOpc, VT, Custom);
796     };
797 
798     // Sets common extload/truncstore actions on RVV floating-point vector
799     // types.
800     const auto SetCommonVFPExtLoadTruncStoreActions =
801         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
802           for (auto SmallVT : SmallerVTs) {
803             setTruncStoreAction(VT, SmallVT, Expand);
804             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
805           }
806         };
807 
808     if (Subtarget.hasVInstructionsF16())
809       for (MVT VT : F16VecVTs)
810         SetCommonVFPActions(VT);
811 
812     for (MVT VT : F32VecVTs) {
813       if (Subtarget.hasVInstructionsF32())
814         SetCommonVFPActions(VT);
815       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
816     }
817 
818     for (MVT VT : F64VecVTs) {
819       if (Subtarget.hasVInstructionsF64())
820         SetCommonVFPActions(VT);
821       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
822       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
823     }
824 
825     if (Subtarget.useRVVForFixedLengthVectors()) {
826       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
827         if (!useRVVForFixedLengthVectorVT(VT))
828           continue;
829 
830         // By default everything must be expanded.
831         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
832           setOperationAction(Op, VT, Expand);
833         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
834           setTruncStoreAction(VT, OtherVT, Expand);
835           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
836           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
837           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
838         }
839 
840         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
841         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
842         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
843 
844         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
845         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
846 
847         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
848         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
849 
850         setOperationAction(ISD::LOAD, VT, Custom);
851         setOperationAction(ISD::STORE, VT, Custom);
852 
853         setOperationAction(ISD::SETCC, VT, Custom);
854 
855         setOperationAction(ISD::SELECT, VT, Custom);
856 
857         setOperationAction(ISD::TRUNCATE, VT, Custom);
858 
859         setOperationAction(ISD::BITCAST, VT, Custom);
860 
861         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
862         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
863         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
864 
865         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
866         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
867         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
868 
869         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
870         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
871         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
872         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
873 
874         // Operations below are different for between masks and other vectors.
875         if (VT.getVectorElementType() == MVT::i1) {
876           setOperationAction(ISD::VP_AND, VT, Custom);
877           setOperationAction(ISD::VP_OR, VT, Custom);
878           setOperationAction(ISD::VP_XOR, VT, Custom);
879           setOperationAction(ISD::AND, VT, Custom);
880           setOperationAction(ISD::OR, VT, Custom);
881           setOperationAction(ISD::XOR, VT, Custom);
882           continue;
883         }
884 
885         // Use SPLAT_VECTOR to prevent type legalization from destroying the
886         // splats when type legalizing i64 scalar on RV32.
887         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
888         // improvements first.
889         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
890           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
891           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
892         }
893 
894         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
895         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
896 
897         setOperationAction(ISD::MLOAD, VT, Custom);
898         setOperationAction(ISD::MSTORE, VT, Custom);
899         setOperationAction(ISD::MGATHER, VT, Custom);
900         setOperationAction(ISD::MSCATTER, VT, Custom);
901 
902         setOperationAction(ISD::VP_LOAD, VT, Custom);
903         setOperationAction(ISD::VP_STORE, VT, Custom);
904         setOperationAction(ISD::VP_GATHER, VT, Custom);
905         setOperationAction(ISD::VP_SCATTER, VT, Custom);
906 
907         setOperationAction(ISD::ADD, VT, Custom);
908         setOperationAction(ISD::MUL, VT, Custom);
909         setOperationAction(ISD::SUB, VT, Custom);
910         setOperationAction(ISD::AND, VT, Custom);
911         setOperationAction(ISD::OR, VT, Custom);
912         setOperationAction(ISD::XOR, VT, Custom);
913         setOperationAction(ISD::SDIV, VT, Custom);
914         setOperationAction(ISD::SREM, VT, Custom);
915         setOperationAction(ISD::UDIV, VT, Custom);
916         setOperationAction(ISD::UREM, VT, Custom);
917         setOperationAction(ISD::SHL, VT, Custom);
918         setOperationAction(ISD::SRA, VT, Custom);
919         setOperationAction(ISD::SRL, VT, Custom);
920 
921         setOperationAction(ISD::SMIN, VT, Custom);
922         setOperationAction(ISD::SMAX, VT, Custom);
923         setOperationAction(ISD::UMIN, VT, Custom);
924         setOperationAction(ISD::UMAX, VT, Custom);
925         setOperationAction(ISD::ABS,  VT, Custom);
926 
927         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
928         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
929           setOperationAction(ISD::MULHS, VT, Custom);
930           setOperationAction(ISD::MULHU, VT, Custom);
931         }
932 
933         setOperationAction(ISD::SADDSAT, VT, Custom);
934         setOperationAction(ISD::UADDSAT, VT, Custom);
935         setOperationAction(ISD::SSUBSAT, VT, Custom);
936         setOperationAction(ISD::USUBSAT, VT, Custom);
937 
938         setOperationAction(ISD::VSELECT, VT, Custom);
939         setOperationAction(ISD::SELECT_CC, VT, Expand);
940 
941         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
942         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
943         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
944 
945         // Custom-lower reduction operations to set up the corresponding custom
946         // nodes' operands.
947         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
948         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
949         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
950         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
951         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
952 
953         for (unsigned VPOpc : IntegerVPOps)
954           setOperationAction(VPOpc, VT, Custom);
955 
956         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
957         // type that can represent the value exactly.
958         if (VT.getVectorElementType() != MVT::i64) {
959           MVT FloatEltVT =
960               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
961           EVT FloatVT =
962               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
963           if (isTypeLegal(FloatVT)) {
964             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
965             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
966           }
967         }
968       }
969 
970       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
971         if (!useRVVForFixedLengthVectorVT(VT))
972           continue;
973 
974         // By default everything must be expanded.
975         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
976           setOperationAction(Op, VT, Expand);
977         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
978           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
979           setTruncStoreAction(VT, OtherVT, Expand);
980         }
981 
982         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
983         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
984         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
985 
986         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
987         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
988         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
989         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
990         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
991 
992         setOperationAction(ISD::LOAD, VT, Custom);
993         setOperationAction(ISD::STORE, VT, Custom);
994         setOperationAction(ISD::MLOAD, VT, Custom);
995         setOperationAction(ISD::MSTORE, VT, Custom);
996         setOperationAction(ISD::MGATHER, VT, Custom);
997         setOperationAction(ISD::MSCATTER, VT, Custom);
998 
999         setOperationAction(ISD::VP_LOAD, VT, Custom);
1000         setOperationAction(ISD::VP_STORE, VT, Custom);
1001         setOperationAction(ISD::VP_GATHER, VT, Custom);
1002         setOperationAction(ISD::VP_SCATTER, VT, Custom);
1003 
1004         setOperationAction(ISD::FADD, VT, Custom);
1005         setOperationAction(ISD::FSUB, VT, Custom);
1006         setOperationAction(ISD::FMUL, VT, Custom);
1007         setOperationAction(ISD::FDIV, VT, Custom);
1008         setOperationAction(ISD::FNEG, VT, Custom);
1009         setOperationAction(ISD::FABS, VT, Custom);
1010         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1011         setOperationAction(ISD::FSQRT, VT, Custom);
1012         setOperationAction(ISD::FMA, VT, Custom);
1013         setOperationAction(ISD::FMINNUM, VT, Custom);
1014         setOperationAction(ISD::FMAXNUM, VT, Custom);
1015 
1016         setOperationAction(ISD::FP_ROUND, VT, Custom);
1017         setOperationAction(ISD::FP_EXTEND, VT, Custom);
1018 
1019         setOperationAction(ISD::FTRUNC, VT, Custom);
1020         setOperationAction(ISD::FCEIL, VT, Custom);
1021         setOperationAction(ISD::FFLOOR, VT, Custom);
1022         setOperationAction(ISD::FROUND, VT, Custom);
1023 
1024         for (auto CC : VFPCCToExpand)
1025           setCondCodeAction(CC, VT, Expand);
1026 
1027         setOperationAction(ISD::VSELECT, VT, Custom);
1028         setOperationAction(ISD::SELECT, VT, Custom);
1029         setOperationAction(ISD::SELECT_CC, VT, Expand);
1030 
1031         setOperationAction(ISD::BITCAST, VT, Custom);
1032 
1033         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1034         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1035         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1036         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1037 
1038         for (unsigned VPOpc : FloatingPointVPOps)
1039           setOperationAction(VPOpc, VT, Custom);
1040       }
1041 
1042       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1043       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1044       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1045       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1046       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1047       if (Subtarget.hasStdExtZfh())
1048         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1049       if (Subtarget.hasStdExtF())
1050         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1051       if (Subtarget.hasStdExtD())
1052         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1053     }
1054   }
1055 
1056   // Function alignments.
1057   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1058   setMinFunctionAlignment(FunctionAlignment);
1059   setPrefFunctionAlignment(FunctionAlignment);
1060 
1061   setMinimumJumpTableEntries(5);
1062 
1063   // Jumps are expensive, compared to logic
1064   setJumpIsExpensive();
1065 
1066   setTargetDAGCombine(ISD::ADD);
1067   setTargetDAGCombine(ISD::SUB);
1068   setTargetDAGCombine(ISD::AND);
1069   setTargetDAGCombine(ISD::OR);
1070   setTargetDAGCombine(ISD::XOR);
1071   setTargetDAGCombine(ISD::ANY_EXTEND);
1072   if (Subtarget.hasStdExtF()) {
1073     setTargetDAGCombine(ISD::ZERO_EXTEND);
1074     setTargetDAGCombine(ISD::FP_TO_SINT);
1075     setTargetDAGCombine(ISD::FP_TO_UINT);
1076     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1077     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1078   }
1079   if (Subtarget.hasVInstructions()) {
1080     setTargetDAGCombine(ISD::FCOPYSIGN);
1081     setTargetDAGCombine(ISD::MGATHER);
1082     setTargetDAGCombine(ISD::MSCATTER);
1083     setTargetDAGCombine(ISD::VP_GATHER);
1084     setTargetDAGCombine(ISD::VP_SCATTER);
1085     setTargetDAGCombine(ISD::SRA);
1086     setTargetDAGCombine(ISD::SRL);
1087     setTargetDAGCombine(ISD::SHL);
1088     setTargetDAGCombine(ISD::STORE);
1089     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1090   }
1091 
1092   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1093   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1094 }
1095 
1096 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1097                                             LLVMContext &Context,
1098                                             EVT VT) const {
1099   if (!VT.isVector())
1100     return getPointerTy(DL);
1101   if (Subtarget.hasVInstructions() &&
1102       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1103     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1104   return VT.changeVectorElementTypeToInteger();
1105 }
1106 
1107 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1108   return Subtarget.getXLenVT();
1109 }
1110 
1111 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1112                                              const CallInst &I,
1113                                              MachineFunction &MF,
1114                                              unsigned Intrinsic) const {
1115   auto &DL = I.getModule()->getDataLayout();
1116   switch (Intrinsic) {
1117   default:
1118     return false;
1119   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1120   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1121   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1122   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1123   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1124   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1125   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1126   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1127   case Intrinsic::riscv_masked_cmpxchg_i32:
1128     Info.opc = ISD::INTRINSIC_W_CHAIN;
1129     Info.memVT = MVT::i32;
1130     Info.ptrVal = I.getArgOperand(0);
1131     Info.offset = 0;
1132     Info.align = Align(4);
1133     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1134                  MachineMemOperand::MOVolatile;
1135     return true;
1136   case Intrinsic::riscv_masked_strided_load:
1137     Info.opc = ISD::INTRINSIC_W_CHAIN;
1138     Info.ptrVal = I.getArgOperand(1);
1139     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1140     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1141     Info.size = MemoryLocation::UnknownSize;
1142     Info.flags |= MachineMemOperand::MOLoad;
1143     return true;
1144   case Intrinsic::riscv_masked_strided_store:
1145     Info.opc = ISD::INTRINSIC_VOID;
1146     Info.ptrVal = I.getArgOperand(1);
1147     Info.memVT =
1148         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1149     Info.align = Align(
1150         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1151         8);
1152     Info.size = MemoryLocation::UnknownSize;
1153     Info.flags |= MachineMemOperand::MOStore;
1154     return true;
1155   }
1156 }
1157 
1158 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1159                                                 const AddrMode &AM, Type *Ty,
1160                                                 unsigned AS,
1161                                                 Instruction *I) const {
1162   // No global is ever allowed as a base.
1163   if (AM.BaseGV)
1164     return false;
1165 
1166   // Require a 12-bit signed offset.
1167   if (!isInt<12>(AM.BaseOffs))
1168     return false;
1169 
1170   switch (AM.Scale) {
1171   case 0: // "r+i" or just "i", depending on HasBaseReg.
1172     break;
1173   case 1:
1174     if (!AM.HasBaseReg) // allow "r+i".
1175       break;
1176     return false; // disallow "r+r" or "r+r+i".
1177   default:
1178     return false;
1179   }
1180 
1181   return true;
1182 }
1183 
1184 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1185   return isInt<12>(Imm);
1186 }
1187 
1188 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1189   return isInt<12>(Imm);
1190 }
1191 
1192 // On RV32, 64-bit integers are split into their high and low parts and held
1193 // in two different registers, so the trunc is free since the low register can
1194 // just be used.
1195 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1196   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1197     return false;
1198   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1199   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1200   return (SrcBits == 64 && DestBits == 32);
1201 }
1202 
1203 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1204   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1205       !SrcVT.isInteger() || !DstVT.isInteger())
1206     return false;
1207   unsigned SrcBits = SrcVT.getSizeInBits();
1208   unsigned DestBits = DstVT.getSizeInBits();
1209   return (SrcBits == 64 && DestBits == 32);
1210 }
1211 
1212 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1213   // Zexts are free if they can be combined with a load.
1214   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1215   // poorly with type legalization of compares preferring sext.
1216   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1217     EVT MemVT = LD->getMemoryVT();
1218     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1219         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1220          LD->getExtensionType() == ISD::ZEXTLOAD))
1221       return true;
1222   }
1223 
1224   return TargetLowering::isZExtFree(Val, VT2);
1225 }
1226 
1227 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1228   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1229 }
1230 
1231 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1232   return Subtarget.hasStdExtZbb();
1233 }
1234 
1235 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1236   return Subtarget.hasStdExtZbb();
1237 }
1238 
1239 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1240   EVT VT = Y.getValueType();
1241 
1242   // FIXME: Support vectors once we have tests.
1243   if (VT.isVector())
1244     return false;
1245 
1246   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1247           Subtarget.hasStdExtZbkb()) &&
1248          !isa<ConstantSDNode>(Y);
1249 }
1250 
1251 /// Check if sinking \p I's operands to I's basic block is profitable, because
1252 /// the operands can be folded into a target instruction, e.g.
1253 /// splats of scalars can fold into vector instructions.
1254 bool RISCVTargetLowering::shouldSinkOperands(
1255     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1256   using namespace llvm::PatternMatch;
1257 
1258   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1259     return false;
1260 
1261   auto IsSinker = [&](Instruction *I, int Operand) {
1262     switch (I->getOpcode()) {
1263     case Instruction::Add:
1264     case Instruction::Sub:
1265     case Instruction::Mul:
1266     case Instruction::And:
1267     case Instruction::Or:
1268     case Instruction::Xor:
1269     case Instruction::FAdd:
1270     case Instruction::FSub:
1271     case Instruction::FMul:
1272     case Instruction::FDiv:
1273     case Instruction::ICmp:
1274     case Instruction::FCmp:
1275       return true;
1276     case Instruction::Shl:
1277     case Instruction::LShr:
1278     case Instruction::AShr:
1279     case Instruction::UDiv:
1280     case Instruction::SDiv:
1281     case Instruction::URem:
1282     case Instruction::SRem:
1283       return Operand == 1;
1284     case Instruction::Call:
1285       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1286         switch (II->getIntrinsicID()) {
1287         case Intrinsic::fma:
1288           return Operand == 0 || Operand == 1;
1289         // FIXME: Our patterns can only match vx/vf instructions when the splat
1290         // it on the RHS, because TableGen doesn't recognize our VP operations
1291         // as commutative.
1292         case Intrinsic::vp_add:
1293         case Intrinsic::vp_mul:
1294         case Intrinsic::vp_and:
1295         case Intrinsic::vp_or:
1296         case Intrinsic::vp_xor:
1297         case Intrinsic::vp_fadd:
1298         case Intrinsic::vp_fmul:
1299         case Intrinsic::vp_shl:
1300         case Intrinsic::vp_lshr:
1301         case Intrinsic::vp_ashr:
1302         case Intrinsic::vp_udiv:
1303         case Intrinsic::vp_sdiv:
1304         case Intrinsic::vp_urem:
1305         case Intrinsic::vp_srem:
1306           return Operand == 1;
1307         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1308         // explicit patterns for both LHS and RHS (as 'vr' versions).
1309         case Intrinsic::vp_sub:
1310         case Intrinsic::vp_fsub:
1311         case Intrinsic::vp_fdiv:
1312           return Operand == 0 || Operand == 1;
1313         default:
1314           return false;
1315         }
1316       }
1317       return false;
1318     default:
1319       return false;
1320     }
1321   };
1322 
1323   for (auto OpIdx : enumerate(I->operands())) {
1324     if (!IsSinker(I, OpIdx.index()))
1325       continue;
1326 
1327     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1328     // Make sure we are not already sinking this operand
1329     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1330       continue;
1331 
1332     // We are looking for a splat that can be sunk.
1333     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1334                              m_Undef(), m_ZeroMask())))
1335       continue;
1336 
1337     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1338     // and vector registers
1339     for (Use &U : Op->uses()) {
1340       Instruction *Insn = cast<Instruction>(U.getUser());
1341       if (!IsSinker(Insn, U.getOperandNo()))
1342         return false;
1343     }
1344 
1345     Ops.push_back(&Op->getOperandUse(0));
1346     Ops.push_back(&OpIdx.value());
1347   }
1348   return true;
1349 }
1350 
1351 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1352                                        bool ForCodeSize) const {
1353   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1354   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1355     return false;
1356   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1357     return false;
1358   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1359     return false;
1360   return Imm.isZero();
1361 }
1362 
1363 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1364   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1365          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1366          (VT == MVT::f64 && Subtarget.hasStdExtD());
1367 }
1368 
1369 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1370                                                       CallingConv::ID CC,
1371                                                       EVT VT) const {
1372   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1373   // We might still end up using a GPR but that will be decided based on ABI.
1374   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1375   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1376     return MVT::f32;
1377 
1378   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1379 }
1380 
1381 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1382                                                            CallingConv::ID CC,
1383                                                            EVT VT) const {
1384   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1385   // We might still end up using a GPR but that will be decided based on ABI.
1386   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1387   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1388     return 1;
1389 
1390   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1391 }
1392 
1393 // Changes the condition code and swaps operands if necessary, so the SetCC
1394 // operation matches one of the comparisons supported directly by branches
1395 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1396 // with 1/-1.
1397 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1398                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1399   // Convert X > -1 to X >= 0.
1400   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1401     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1402     CC = ISD::SETGE;
1403     return;
1404   }
1405   // Convert X < 1 to 0 >= X.
1406   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1407     RHS = LHS;
1408     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1409     CC = ISD::SETGE;
1410     return;
1411   }
1412 
1413   switch (CC) {
1414   default:
1415     break;
1416   case ISD::SETGT:
1417   case ISD::SETLE:
1418   case ISD::SETUGT:
1419   case ISD::SETULE:
1420     CC = ISD::getSetCCSwappedOperands(CC);
1421     std::swap(LHS, RHS);
1422     break;
1423   }
1424 }
1425 
1426 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1427   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1428   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1429   if (VT.getVectorElementType() == MVT::i1)
1430     KnownSize *= 8;
1431 
1432   switch (KnownSize) {
1433   default:
1434     llvm_unreachable("Invalid LMUL.");
1435   case 8:
1436     return RISCVII::VLMUL::LMUL_F8;
1437   case 16:
1438     return RISCVII::VLMUL::LMUL_F4;
1439   case 32:
1440     return RISCVII::VLMUL::LMUL_F2;
1441   case 64:
1442     return RISCVII::VLMUL::LMUL_1;
1443   case 128:
1444     return RISCVII::VLMUL::LMUL_2;
1445   case 256:
1446     return RISCVII::VLMUL::LMUL_4;
1447   case 512:
1448     return RISCVII::VLMUL::LMUL_8;
1449   }
1450 }
1451 
1452 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1453   switch (LMul) {
1454   default:
1455     llvm_unreachable("Invalid LMUL.");
1456   case RISCVII::VLMUL::LMUL_F8:
1457   case RISCVII::VLMUL::LMUL_F4:
1458   case RISCVII::VLMUL::LMUL_F2:
1459   case RISCVII::VLMUL::LMUL_1:
1460     return RISCV::VRRegClassID;
1461   case RISCVII::VLMUL::LMUL_2:
1462     return RISCV::VRM2RegClassID;
1463   case RISCVII::VLMUL::LMUL_4:
1464     return RISCV::VRM4RegClassID;
1465   case RISCVII::VLMUL::LMUL_8:
1466     return RISCV::VRM8RegClassID;
1467   }
1468 }
1469 
1470 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1471   RISCVII::VLMUL LMUL = getLMUL(VT);
1472   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1473       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1474       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1475       LMUL == RISCVII::VLMUL::LMUL_1) {
1476     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1477                   "Unexpected subreg numbering");
1478     return RISCV::sub_vrm1_0 + Index;
1479   }
1480   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1481     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1482                   "Unexpected subreg numbering");
1483     return RISCV::sub_vrm2_0 + Index;
1484   }
1485   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1486     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1487                   "Unexpected subreg numbering");
1488     return RISCV::sub_vrm4_0 + Index;
1489   }
1490   llvm_unreachable("Invalid vector type.");
1491 }
1492 
1493 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1494   if (VT.getVectorElementType() == MVT::i1)
1495     return RISCV::VRRegClassID;
1496   return getRegClassIDForLMUL(getLMUL(VT));
1497 }
1498 
1499 // Attempt to decompose a subvector insert/extract between VecVT and
1500 // SubVecVT via subregister indices. Returns the subregister index that
1501 // can perform the subvector insert/extract with the given element index, as
1502 // well as the index corresponding to any leftover subvectors that must be
1503 // further inserted/extracted within the register class for SubVecVT.
1504 std::pair<unsigned, unsigned>
1505 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1506     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1507     const RISCVRegisterInfo *TRI) {
1508   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1509                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1510                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1511                 "Register classes not ordered");
1512   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1513   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1514   // Try to compose a subregister index that takes us from the incoming
1515   // LMUL>1 register class down to the outgoing one. At each step we half
1516   // the LMUL:
1517   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1518   // Note that this is not guaranteed to find a subregister index, such as
1519   // when we are extracting from one VR type to another.
1520   unsigned SubRegIdx = RISCV::NoSubRegister;
1521   for (const unsigned RCID :
1522        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1523     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1524       VecVT = VecVT.getHalfNumVectorElementsVT();
1525       bool IsHi =
1526           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1527       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1528                                             getSubregIndexByMVT(VecVT, IsHi));
1529       if (IsHi)
1530         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1531     }
1532   return {SubRegIdx, InsertExtractIdx};
1533 }
1534 
1535 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1536 // stores for those types.
1537 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1538   return !Subtarget.useRVVForFixedLengthVectors() ||
1539          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1540 }
1541 
1542 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1543   if (ScalarTy->isPointerTy())
1544     return true;
1545 
1546   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1547       ScalarTy->isIntegerTy(32))
1548     return true;
1549 
1550   if (ScalarTy->isIntegerTy(64))
1551     return Subtarget.hasVInstructionsI64();
1552 
1553   if (ScalarTy->isHalfTy())
1554     return Subtarget.hasVInstructionsF16();
1555   if (ScalarTy->isFloatTy())
1556     return Subtarget.hasVInstructionsF32();
1557   if (ScalarTy->isDoubleTy())
1558     return Subtarget.hasVInstructionsF64();
1559 
1560   return false;
1561 }
1562 
1563 static SDValue getVLOperand(SDValue Op) {
1564   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1565           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1566          "Unexpected opcode");
1567   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1568   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1569   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1570       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1571   if (!II)
1572     return SDValue();
1573   return Op.getOperand(II->VLOperand + 1 + HasChain);
1574 }
1575 
1576 static bool useRVVForFixedLengthVectorVT(MVT VT,
1577                                          const RISCVSubtarget &Subtarget) {
1578   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1579   if (!Subtarget.useRVVForFixedLengthVectors())
1580     return false;
1581 
1582   // We only support a set of vector types with a consistent maximum fixed size
1583   // across all supported vector element types to avoid legalization issues.
1584   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1585   // fixed-length vector type we support is 1024 bytes.
1586   if (VT.getFixedSizeInBits() > 1024 * 8)
1587     return false;
1588 
1589   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1590 
1591   MVT EltVT = VT.getVectorElementType();
1592 
1593   // Don't use RVV for vectors we cannot scalarize if required.
1594   switch (EltVT.SimpleTy) {
1595   // i1 is supported but has different rules.
1596   default:
1597     return false;
1598   case MVT::i1:
1599     // Masks can only use a single register.
1600     if (VT.getVectorNumElements() > MinVLen)
1601       return false;
1602     MinVLen /= 8;
1603     break;
1604   case MVT::i8:
1605   case MVT::i16:
1606   case MVT::i32:
1607     break;
1608   case MVT::i64:
1609     if (!Subtarget.hasVInstructionsI64())
1610       return false;
1611     break;
1612   case MVT::f16:
1613     if (!Subtarget.hasVInstructionsF16())
1614       return false;
1615     break;
1616   case MVT::f32:
1617     if (!Subtarget.hasVInstructionsF32())
1618       return false;
1619     break;
1620   case MVT::f64:
1621     if (!Subtarget.hasVInstructionsF64())
1622       return false;
1623     break;
1624   }
1625 
1626   // Reject elements larger than ELEN.
1627   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1628     return false;
1629 
1630   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1631   // Don't use RVV for types that don't fit.
1632   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1633     return false;
1634 
1635   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1636   // the base fixed length RVV support in place.
1637   if (!VT.isPow2VectorType())
1638     return false;
1639 
1640   return true;
1641 }
1642 
1643 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1644   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1645 }
1646 
1647 // Return the largest legal scalable vector type that matches VT's element type.
1648 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1649                                             const RISCVSubtarget &Subtarget) {
1650   // This may be called before legal types are setup.
1651   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1652           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1653          "Expected legal fixed length vector!");
1654 
1655   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1656   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1657 
1658   MVT EltVT = VT.getVectorElementType();
1659   switch (EltVT.SimpleTy) {
1660   default:
1661     llvm_unreachable("unexpected element type for RVV container");
1662   case MVT::i1:
1663   case MVT::i8:
1664   case MVT::i16:
1665   case MVT::i32:
1666   case MVT::i64:
1667   case MVT::f16:
1668   case MVT::f32:
1669   case MVT::f64: {
1670     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1671     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1672     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1673     unsigned NumElts =
1674         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1675     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1676     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1677     return MVT::getScalableVectorVT(EltVT, NumElts);
1678   }
1679   }
1680 }
1681 
1682 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1683                                             const RISCVSubtarget &Subtarget) {
1684   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1685                                           Subtarget);
1686 }
1687 
1688 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1689   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1690 }
1691 
1692 // Grow V to consume an entire RVV register.
1693 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1694                                        const RISCVSubtarget &Subtarget) {
1695   assert(VT.isScalableVector() &&
1696          "Expected to convert into a scalable vector!");
1697   assert(V.getValueType().isFixedLengthVector() &&
1698          "Expected a fixed length vector operand!");
1699   SDLoc DL(V);
1700   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1701   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1702 }
1703 
1704 // Shrink V so it's just big enough to maintain a VT's worth of data.
1705 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1706                                          const RISCVSubtarget &Subtarget) {
1707   assert(VT.isFixedLengthVector() &&
1708          "Expected to convert into a fixed length vector!");
1709   assert(V.getValueType().isScalableVector() &&
1710          "Expected a scalable vector operand!");
1711   SDLoc DL(V);
1712   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1713   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1714 }
1715 
1716 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1717 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1718 // the vector type that it is contained in.
1719 static std::pair<SDValue, SDValue>
1720 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1721                 const RISCVSubtarget &Subtarget) {
1722   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1723   MVT XLenVT = Subtarget.getXLenVT();
1724   SDValue VL = VecVT.isFixedLengthVector()
1725                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1726                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1727   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1728   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1729   return {Mask, VL};
1730 }
1731 
1732 // As above but assuming the given type is a scalable vector type.
1733 static std::pair<SDValue, SDValue>
1734 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1735                         const RISCVSubtarget &Subtarget) {
1736   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1737   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1738 }
1739 
1740 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1741 // of either is (currently) supported. This can get us into an infinite loop
1742 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1743 // as a ..., etc.
1744 // Until either (or both) of these can reliably lower any node, reporting that
1745 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1746 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1747 // which is not desirable.
1748 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1749     EVT VT, unsigned DefinedValues) const {
1750   return false;
1751 }
1752 
1753 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1754                                   const RISCVSubtarget &Subtarget) {
1755   // RISCV FP-to-int conversions saturate to the destination register size, but
1756   // don't produce 0 for nan. We can use a conversion instruction and fix the
1757   // nan case with a compare and a select.
1758   SDValue Src = Op.getOperand(0);
1759 
1760   EVT DstVT = Op.getValueType();
1761   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1762 
1763   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1764   unsigned Opc;
1765   if (SatVT == DstVT)
1766     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1767   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1768     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1769   else
1770     return SDValue();
1771   // FIXME: Support other SatVTs by clamping before or after the conversion.
1772 
1773   SDLoc DL(Op);
1774   SDValue FpToInt = DAG.getNode(
1775       Opc, DL, DstVT, Src,
1776       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1777 
1778   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1779   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1780 }
1781 
1782 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1783 // and back. Taking care to avoid converting values that are nan or already
1784 // correct.
1785 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1786 // have FRM dependencies modeled yet.
1787 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1788   MVT VT = Op.getSimpleValueType();
1789   assert(VT.isVector() && "Unexpected type");
1790 
1791   SDLoc DL(Op);
1792 
1793   // Freeze the source since we are increasing the number of uses.
1794   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1795 
1796   // Truncate to integer and convert back to FP.
1797   MVT IntVT = VT.changeVectorElementTypeToInteger();
1798   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1799   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1800 
1801   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1802 
1803   if (Op.getOpcode() == ISD::FCEIL) {
1804     // If the truncated value is the greater than or equal to the original
1805     // value, we've computed the ceil. Otherwise, we went the wrong way and
1806     // need to increase by 1.
1807     // FIXME: This should use a masked operation. Handle here or in isel?
1808     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1809                                  DAG.getConstantFP(1.0, DL, VT));
1810     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1811     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1812   } else if (Op.getOpcode() == ISD::FFLOOR) {
1813     // If the truncated value is the less than or equal to the original value,
1814     // we've computed the floor. Otherwise, we went the wrong way and need to
1815     // decrease by 1.
1816     // FIXME: This should use a masked operation. Handle here or in isel?
1817     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1818                                  DAG.getConstantFP(1.0, DL, VT));
1819     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1820     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1821   }
1822 
1823   // Restore the original sign so that -0.0 is preserved.
1824   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1825 
1826   // Determine the largest integer that can be represented exactly. This and
1827   // values larger than it don't have any fractional bits so don't need to
1828   // be converted.
1829   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1830   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1831   APFloat MaxVal = APFloat(FltSem);
1832   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1833                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1834   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1835 
1836   // If abs(Src) was larger than MaxVal or nan, keep it.
1837   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1838   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1839   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1840 }
1841 
1842 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1843 // This mode isn't supported in vector hardware on RISCV. But as long as we
1844 // aren't compiling with trapping math, we can emulate this with
1845 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1846 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1847 // dependencies modeled yet.
1848 // FIXME: Use masked operations to avoid final merge.
1849 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1850   MVT VT = Op.getSimpleValueType();
1851   assert(VT.isVector() && "Unexpected type");
1852 
1853   SDLoc DL(Op);
1854 
1855   // Freeze the source since we are increasing the number of uses.
1856   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1857 
1858   // We do the conversion on the absolute value and fix the sign at the end.
1859   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1860 
1861   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1862   bool Ignored;
1863   APFloat Point5Pred = APFloat(0.5f);
1864   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1865   Point5Pred.next(/*nextDown*/ true);
1866 
1867   // Add the adjustment.
1868   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1869                                DAG.getConstantFP(Point5Pred, DL, VT));
1870 
1871   // Truncate to integer and convert back to fp.
1872   MVT IntVT = VT.changeVectorElementTypeToInteger();
1873   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1874   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1875 
1876   // Restore the original sign.
1877   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1878 
1879   // Determine the largest integer that can be represented exactly. This and
1880   // values larger than it don't have any fractional bits so don't need to
1881   // be converted.
1882   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1883   APFloat MaxVal = APFloat(FltSem);
1884   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1885                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1886   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1887 
1888   // If abs(Src) was larger than MaxVal or nan, keep it.
1889   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1890   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1891   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1892 }
1893 
1894 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1895                                  const RISCVSubtarget &Subtarget) {
1896   MVT VT = Op.getSimpleValueType();
1897   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1898 
1899   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1900 
1901   SDLoc DL(Op);
1902   SDValue Mask, VL;
1903   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1904 
1905   unsigned Opc =
1906       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1907   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1908   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1909 }
1910 
1911 struct VIDSequence {
1912   int64_t StepNumerator;
1913   unsigned StepDenominator;
1914   int64_t Addend;
1915 };
1916 
1917 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1918 // to the (non-zero) step S and start value X. This can be then lowered as the
1919 // RVV sequence (VID * S) + X, for example.
1920 // The step S is represented as an integer numerator divided by a positive
1921 // denominator. Note that the implementation currently only identifies
1922 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1923 // cannot detect 2/3, for example.
1924 // Note that this method will also match potentially unappealing index
1925 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1926 // determine whether this is worth generating code for.
1927 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1928   unsigned NumElts = Op.getNumOperands();
1929   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1930   if (!Op.getValueType().isInteger())
1931     return None;
1932 
1933   Optional<unsigned> SeqStepDenom;
1934   Optional<int64_t> SeqStepNum, SeqAddend;
1935   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1936   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1937   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1938     // Assume undef elements match the sequence; we just have to be careful
1939     // when interpolating across them.
1940     if (Op.getOperand(Idx).isUndef())
1941       continue;
1942     // The BUILD_VECTOR must be all constants.
1943     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1944       return None;
1945 
1946     uint64_t Val = Op.getConstantOperandVal(Idx) &
1947                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1948 
1949     if (PrevElt) {
1950       // Calculate the step since the last non-undef element, and ensure
1951       // it's consistent across the entire sequence.
1952       unsigned IdxDiff = Idx - PrevElt->second;
1953       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1954 
1955       // A zero-value value difference means that we're somewhere in the middle
1956       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1957       // step change before evaluating the sequence.
1958       if (ValDiff != 0) {
1959         int64_t Remainder = ValDiff % IdxDiff;
1960         // Normalize the step if it's greater than 1.
1961         if (Remainder != ValDiff) {
1962           // The difference must cleanly divide the element span.
1963           if (Remainder != 0)
1964             return None;
1965           ValDiff /= IdxDiff;
1966           IdxDiff = 1;
1967         }
1968 
1969         if (!SeqStepNum)
1970           SeqStepNum = ValDiff;
1971         else if (ValDiff != SeqStepNum)
1972           return None;
1973 
1974         if (!SeqStepDenom)
1975           SeqStepDenom = IdxDiff;
1976         else if (IdxDiff != *SeqStepDenom)
1977           return None;
1978       }
1979     }
1980 
1981     // Record and/or check any addend.
1982     if (SeqStepNum && SeqStepDenom) {
1983       uint64_t ExpectedVal =
1984           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1985       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1986       if (!SeqAddend)
1987         SeqAddend = Addend;
1988       else if (SeqAddend != Addend)
1989         return None;
1990     }
1991 
1992     // Record this non-undef element for later.
1993     if (!PrevElt || PrevElt->first != Val)
1994       PrevElt = std::make_pair(Val, Idx);
1995   }
1996   // We need to have logged both a step and an addend for this to count as
1997   // a legal index sequence.
1998   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1999     return None;
2000 
2001   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
2002 }
2003 
2004 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
2005 // and lower it as a VRGATHER_VX_VL from the source vector.
2006 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
2007                                   SelectionDAG &DAG,
2008                                   const RISCVSubtarget &Subtarget) {
2009   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2010     return SDValue();
2011   SDValue Vec = SplatVal.getOperand(0);
2012   // Only perform this optimization on vectors of the same size for simplicity.
2013   if (Vec.getValueType() != VT)
2014     return SDValue();
2015   SDValue Idx = SplatVal.getOperand(1);
2016   // The index must be a legal type.
2017   if (Idx.getValueType() != Subtarget.getXLenVT())
2018     return SDValue();
2019 
2020   MVT ContainerVT = VT;
2021   if (VT.isFixedLengthVector()) {
2022     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2023     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2024   }
2025 
2026   SDValue Mask, VL;
2027   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2028 
2029   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2030                                Idx, Mask, VL);
2031 
2032   if (!VT.isFixedLengthVector())
2033     return Gather;
2034 
2035   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2036 }
2037 
2038 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2039                                  const RISCVSubtarget &Subtarget) {
2040   MVT VT = Op.getSimpleValueType();
2041   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2042 
2043   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2044 
2045   SDLoc DL(Op);
2046   SDValue Mask, VL;
2047   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2048 
2049   MVT XLenVT = Subtarget.getXLenVT();
2050   unsigned NumElts = Op.getNumOperands();
2051 
2052   if (VT.getVectorElementType() == MVT::i1) {
2053     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2054       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2055       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2056     }
2057 
2058     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2059       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2060       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2061     }
2062 
2063     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2064     // scalar integer chunks whose bit-width depends on the number of mask
2065     // bits and XLEN.
2066     // First, determine the most appropriate scalar integer type to use. This
2067     // is at most XLenVT, but may be shrunk to a smaller vector element type
2068     // according to the size of the final vector - use i8 chunks rather than
2069     // XLenVT if we're producing a v8i1. This results in more consistent
2070     // codegen across RV32 and RV64.
2071     unsigned NumViaIntegerBits =
2072         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2073     NumViaIntegerBits = std::min(NumViaIntegerBits,
2074                                  Subtarget.getMaxELENForFixedLengthVectors());
2075     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2076       // If we have to use more than one INSERT_VECTOR_ELT then this
2077       // optimization is likely to increase code size; avoid peforming it in
2078       // such a case. We can use a load from a constant pool in this case.
2079       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2080         return SDValue();
2081       // Now we can create our integer vector type. Note that it may be larger
2082       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2083       MVT IntegerViaVecVT =
2084           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2085                            divideCeil(NumElts, NumViaIntegerBits));
2086 
2087       uint64_t Bits = 0;
2088       unsigned BitPos = 0, IntegerEltIdx = 0;
2089       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2090 
2091       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2092         // Once we accumulate enough bits to fill our scalar type, insert into
2093         // our vector and clear our accumulated data.
2094         if (I != 0 && I % NumViaIntegerBits == 0) {
2095           if (NumViaIntegerBits <= 32)
2096             Bits = SignExtend64(Bits, 32);
2097           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2098           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2099                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2100           Bits = 0;
2101           BitPos = 0;
2102           IntegerEltIdx++;
2103         }
2104         SDValue V = Op.getOperand(I);
2105         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2106         Bits |= ((uint64_t)BitValue << BitPos);
2107       }
2108 
2109       // Insert the (remaining) scalar value into position in our integer
2110       // vector type.
2111       if (NumViaIntegerBits <= 32)
2112         Bits = SignExtend64(Bits, 32);
2113       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2114       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2115                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2116 
2117       if (NumElts < NumViaIntegerBits) {
2118         // If we're producing a smaller vector than our minimum legal integer
2119         // type, bitcast to the equivalent (known-legal) mask type, and extract
2120         // our final mask.
2121         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2122         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2123         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2124                           DAG.getConstant(0, DL, XLenVT));
2125       } else {
2126         // Else we must have produced an integer type with the same size as the
2127         // mask type; bitcast for the final result.
2128         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2129         Vec = DAG.getBitcast(VT, Vec);
2130       }
2131 
2132       return Vec;
2133     }
2134 
2135     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2136     // vector type, we have a legal equivalently-sized i8 type, so we can use
2137     // that.
2138     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2139     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2140 
2141     SDValue WideVec;
2142     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2143       // For a splat, perform a scalar truncate before creating the wider
2144       // vector.
2145       assert(Splat.getValueType() == XLenVT &&
2146              "Unexpected type for i1 splat value");
2147       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2148                           DAG.getConstant(1, DL, XLenVT));
2149       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2150     } else {
2151       SmallVector<SDValue, 8> Ops(Op->op_values());
2152       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2153       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2154       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2155     }
2156 
2157     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2158   }
2159 
2160   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2161     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2162       return Gather;
2163     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2164                                         : RISCVISD::VMV_V_X_VL;
2165     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
2166     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2167   }
2168 
2169   // Try and match index sequences, which we can lower to the vid instruction
2170   // with optional modifications. An all-undef vector is matched by
2171   // getSplatValue, above.
2172   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2173     int64_t StepNumerator = SimpleVID->StepNumerator;
2174     unsigned StepDenominator = SimpleVID->StepDenominator;
2175     int64_t Addend = SimpleVID->Addend;
2176 
2177     assert(StepNumerator != 0 && "Invalid step");
2178     bool Negate = false;
2179     int64_t SplatStepVal = StepNumerator;
2180     unsigned StepOpcode = ISD::MUL;
2181     if (StepNumerator != 1) {
2182       if (isPowerOf2_64(std::abs(StepNumerator))) {
2183         Negate = StepNumerator < 0;
2184         StepOpcode = ISD::SHL;
2185         SplatStepVal = Log2_64(std::abs(StepNumerator));
2186       }
2187     }
2188 
2189     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2190     // threshold since it's the immediate value many RVV instructions accept.
2191     // There is no vmul.vi instruction so ensure multiply constant can fit in
2192     // a single addi instruction.
2193     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2194          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2195         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2196       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2197       // Convert right out of the scalable type so we can use standard ISD
2198       // nodes for the rest of the computation. If we used scalable types with
2199       // these, we'd lose the fixed-length vector info and generate worse
2200       // vsetvli code.
2201       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2202       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2203           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2204         SDValue SplatStep = DAG.getSplatVector(
2205             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2206         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2207       }
2208       if (StepDenominator != 1) {
2209         SDValue SplatStep = DAG.getSplatVector(
2210             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2211         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2212       }
2213       if (Addend != 0 || Negate) {
2214         SDValue SplatAddend =
2215             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2216         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2217       }
2218       return VID;
2219     }
2220   }
2221 
2222   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2223   // when re-interpreted as a vector with a larger element type. For example,
2224   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2225   // could be instead splat as
2226   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2227   // TODO: This optimization could also work on non-constant splats, but it
2228   // would require bit-manipulation instructions to construct the splat value.
2229   SmallVector<SDValue> Sequence;
2230   unsigned EltBitSize = VT.getScalarSizeInBits();
2231   const auto *BV = cast<BuildVectorSDNode>(Op);
2232   if (VT.isInteger() && EltBitSize < 64 &&
2233       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2234       BV->getRepeatedSequence(Sequence) &&
2235       (Sequence.size() * EltBitSize) <= 64) {
2236     unsigned SeqLen = Sequence.size();
2237     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2238     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2239     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2240             ViaIntVT == MVT::i64) &&
2241            "Unexpected sequence type");
2242 
2243     unsigned EltIdx = 0;
2244     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2245     uint64_t SplatValue = 0;
2246     // Construct the amalgamated value which can be splatted as this larger
2247     // vector type.
2248     for (const auto &SeqV : Sequence) {
2249       if (!SeqV.isUndef())
2250         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2251                        << (EltIdx * EltBitSize));
2252       EltIdx++;
2253     }
2254 
2255     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2256     // achieve better constant materializion.
2257     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2258       SplatValue = SignExtend64(SplatValue, 32);
2259 
2260     // Since we can't introduce illegal i64 types at this stage, we can only
2261     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2262     // way we can use RVV instructions to splat.
2263     assert((ViaIntVT.bitsLE(XLenVT) ||
2264             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2265            "Unexpected bitcast sequence");
2266     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2267       SDValue ViaVL =
2268           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2269       MVT ViaContainerVT =
2270           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2271       SDValue Splat =
2272           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2273                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2274       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2275       return DAG.getBitcast(VT, Splat);
2276     }
2277   }
2278 
2279   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2280   // which constitute a large proportion of the elements. In such cases we can
2281   // splat a vector with the dominant element and make up the shortfall with
2282   // INSERT_VECTOR_ELTs.
2283   // Note that this includes vectors of 2 elements by association. The
2284   // upper-most element is the "dominant" one, allowing us to use a splat to
2285   // "insert" the upper element, and an insert of the lower element at position
2286   // 0, which improves codegen.
2287   SDValue DominantValue;
2288   unsigned MostCommonCount = 0;
2289   DenseMap<SDValue, unsigned> ValueCounts;
2290   unsigned NumUndefElts =
2291       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2292 
2293   // Track the number of scalar loads we know we'd be inserting, estimated as
2294   // any non-zero floating-point constant. Other kinds of element are either
2295   // already in registers or are materialized on demand. The threshold at which
2296   // a vector load is more desirable than several scalar materializion and
2297   // vector-insertion instructions is not known.
2298   unsigned NumScalarLoads = 0;
2299 
2300   for (SDValue V : Op->op_values()) {
2301     if (V.isUndef())
2302       continue;
2303 
2304     ValueCounts.insert(std::make_pair(V, 0));
2305     unsigned &Count = ValueCounts[V];
2306 
2307     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2308       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2309 
2310     // Is this value dominant? In case of a tie, prefer the highest element as
2311     // it's cheaper to insert near the beginning of a vector than it is at the
2312     // end.
2313     if (++Count >= MostCommonCount) {
2314       DominantValue = V;
2315       MostCommonCount = Count;
2316     }
2317   }
2318 
2319   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2320   unsigned NumDefElts = NumElts - NumUndefElts;
2321   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2322 
2323   // Don't perform this optimization when optimizing for size, since
2324   // materializing elements and inserting them tends to cause code bloat.
2325   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2326       ((MostCommonCount > DominantValueCountThreshold) ||
2327        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2328     // Start by splatting the most common element.
2329     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2330 
2331     DenseSet<SDValue> Processed{DominantValue};
2332     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2333     for (const auto &OpIdx : enumerate(Op->ops())) {
2334       const SDValue &V = OpIdx.value();
2335       if (V.isUndef() || !Processed.insert(V).second)
2336         continue;
2337       if (ValueCounts[V] == 1) {
2338         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2339                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2340       } else {
2341         // Blend in all instances of this value using a VSELECT, using a
2342         // mask where each bit signals whether that element is the one
2343         // we're after.
2344         SmallVector<SDValue> Ops;
2345         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2346           return DAG.getConstant(V == V1, DL, XLenVT);
2347         });
2348         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2349                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2350                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2351       }
2352     }
2353 
2354     return Vec;
2355   }
2356 
2357   return SDValue();
2358 }
2359 
2360 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2361                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2362   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2363     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2364     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2365     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2366     // node in order to try and match RVV vector/scalar instructions.
2367     if ((LoC >> 31) == HiC)
2368       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2369 
2370     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2371     // vmv.v.x whose EEW = 32 to lower it.
2372     auto *Const = dyn_cast<ConstantSDNode>(VL);
2373     if (LoC == HiC && Const && Const->isAllOnesValue() &&
2374         Const->getOpcode() != ISD::TargetConstant) {
2375       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2376       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2377       // access the subtarget here now.
2378       auto InterVec = DAG.getNode(
2379           RISCVISD::VMV_V_X_VL, DL, InterVT, Lo,
2380           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
2381       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2382     }
2383   }
2384 
2385   // Fall back to a stack store and stride x0 vector load.
2386   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2387 }
2388 
2389 // Called by type legalization to handle splat of i64 on RV32.
2390 // FIXME: We can optimize this when the type has sign or zero bits in one
2391 // of the halves.
2392 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2393                                    SDValue VL, SelectionDAG &DAG) {
2394   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2395   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2396                            DAG.getConstant(0, DL, MVT::i32));
2397   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2398                            DAG.getConstant(1, DL, MVT::i32));
2399   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2400 }
2401 
2402 // This function lowers a splat of a scalar operand Splat with the vector
2403 // length VL. It ensures the final sequence is type legal, which is useful when
2404 // lowering a splat after type legalization.
2405 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2406                                 SelectionDAG &DAG,
2407                                 const RISCVSubtarget &Subtarget) {
2408   if (VT.isFloatingPoint()) {
2409     // If VL is 1, we could use vfmv.s.f.
2410     if (isOneConstant(VL))
2411       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT),
2412                          Scalar, VL);
2413     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2414   }
2415 
2416   MVT XLenVT = Subtarget.getXLenVT();
2417 
2418   // Simplest case is that the operand needs to be promoted to XLenVT.
2419   if (Scalar.getValueType().bitsLE(XLenVT)) {
2420     // If the operand is a constant, sign extend to increase our chances
2421     // of being able to use a .vi instruction. ANY_EXTEND would become a
2422     // a zero extend and the simm5 check in isel would fail.
2423     // FIXME: Should we ignore the upper bits in isel instead?
2424     unsigned ExtOpc =
2425         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2426     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2427     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2428     // If VL is 1 and the scalar value won't benefit from immediate, we could
2429     // use vmv.s.x.
2430     if (isOneConstant(VL) &&
2431         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2432       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar,
2433                          VL);
2434     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2435   }
2436 
2437   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2438          "Unexpected scalar for splat lowering!");
2439 
2440   if (isOneConstant(VL) && isNullConstant(Scalar))
2441     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT),
2442                        DAG.getConstant(0, DL, XLenVT), VL);
2443 
2444   // Otherwise use the more complicated splatting algorithm.
2445   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2446 }
2447 
2448 // Is the mask a slidedown that shifts in undefs.
2449 static int matchShuffleAsSlideDown(ArrayRef<int> Mask) {
2450   int Size = Mask.size();
2451 
2452   // Elements shifted in should be undef.
2453   auto CheckUndefs = [&](int Shift) {
2454     for (int i = Size - Shift; i != Size; ++i)
2455       if (Mask[i] >= 0)
2456         return false;
2457     return true;
2458   };
2459 
2460   // Elements should be shifted or undef.
2461   auto MatchShift = [&](int Shift) {
2462     for (int i = 0; i != Size - Shift; ++i)
2463        if (Mask[i] >= 0 && Mask[i] != Shift + i)
2464          return false;
2465     return true;
2466   };
2467 
2468   // Try all possible shifts.
2469   for (int Shift = 1; Shift != Size; ++Shift)
2470     if (CheckUndefs(Shift) && MatchShift(Shift))
2471       return Shift;
2472 
2473   // No match.
2474   return -1;
2475 }
2476 
2477 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2478                                 const RISCVSubtarget &Subtarget) {
2479   // We need to be able to widen elements to the next larger integer type.
2480   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2481     return false;
2482 
2483   int Size = Mask.size();
2484   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2485 
2486   int Srcs[] = {-1, -1};
2487   for (int i = 0; i != Size; ++i) {
2488     // Ignore undef elements.
2489     if (Mask[i] < 0)
2490       continue;
2491 
2492     // Is this an even or odd element.
2493     int Pol = i % 2;
2494 
2495     // Ensure we consistently use the same source for this element polarity.
2496     int Src = Mask[i] / Size;
2497     if (Srcs[Pol] < 0)
2498       Srcs[Pol] = Src;
2499     if (Srcs[Pol] != Src)
2500       return false;
2501 
2502     // Make sure the element within the source is appropriate for this element
2503     // in the destination.
2504     int Elt = Mask[i] % Size;
2505     if (Elt != i / 2)
2506       return false;
2507   }
2508 
2509   // We need to find a source for each polarity and they can't be the same.
2510   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2511     return false;
2512 
2513   // Swap the sources if the second source was in the even polarity.
2514   SwapSources = Srcs[0] > Srcs[1];
2515 
2516   return true;
2517 }
2518 
2519 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2520                                    const RISCVSubtarget &Subtarget) {
2521   SDValue V1 = Op.getOperand(0);
2522   SDValue V2 = Op.getOperand(1);
2523   SDLoc DL(Op);
2524   MVT XLenVT = Subtarget.getXLenVT();
2525   MVT VT = Op.getSimpleValueType();
2526   unsigned NumElts = VT.getVectorNumElements();
2527   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2528 
2529   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2530 
2531   SDValue TrueMask, VL;
2532   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2533 
2534   if (SVN->isSplat()) {
2535     const int Lane = SVN->getSplatIndex();
2536     if (Lane >= 0) {
2537       MVT SVT = VT.getVectorElementType();
2538 
2539       // Turn splatted vector load into a strided load with an X0 stride.
2540       SDValue V = V1;
2541       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2542       // with undef.
2543       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2544       int Offset = Lane;
2545       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2546         int OpElements =
2547             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2548         V = V.getOperand(Offset / OpElements);
2549         Offset %= OpElements;
2550       }
2551 
2552       // We need to ensure the load isn't atomic or volatile.
2553       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2554         auto *Ld = cast<LoadSDNode>(V);
2555         Offset *= SVT.getStoreSize();
2556         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2557                                                    TypeSize::Fixed(Offset), DL);
2558 
2559         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2560         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2561           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2562           SDValue IntID =
2563               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2564           SDValue Ops[] = {Ld->getChain(),
2565                            IntID,
2566                            DAG.getUNDEF(ContainerVT),
2567                            NewAddr,
2568                            DAG.getRegister(RISCV::X0, XLenVT),
2569                            VL};
2570           SDValue NewLoad = DAG.getMemIntrinsicNode(
2571               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2572               DAG.getMachineFunction().getMachineMemOperand(
2573                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2574           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2575           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2576         }
2577 
2578         // Otherwise use a scalar load and splat. This will give the best
2579         // opportunity to fold a splat into the operation. ISel can turn it into
2580         // the x0 strided load if we aren't able to fold away the select.
2581         if (SVT.isFloatingPoint())
2582           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2583                           Ld->getPointerInfo().getWithOffset(Offset),
2584                           Ld->getOriginalAlign(),
2585                           Ld->getMemOperand()->getFlags());
2586         else
2587           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2588                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2589                              Ld->getOriginalAlign(),
2590                              Ld->getMemOperand()->getFlags());
2591         DAG.makeEquivalentMemoryOrdering(Ld, V);
2592 
2593         unsigned Opc =
2594             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2595         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2596         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2597       }
2598 
2599       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2600       assert(Lane < (int)NumElts && "Unexpected lane!");
2601       SDValue Gather =
2602           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2603                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2604       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2605     }
2606   }
2607 
2608   ArrayRef<int> Mask = SVN->getMask();
2609 
2610   // Try to match as a slidedown.
2611   int SlideAmt = matchShuffleAsSlideDown(Mask);
2612   if (SlideAmt >= 0) {
2613     // TODO: Should we reduce the VL to account for the upper undef elements?
2614     // Requires additional vsetvlis, but might be faster to execute.
2615     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2616     SDValue SlideDown =
2617         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2618                     DAG.getUNDEF(ContainerVT), V1,
2619                     DAG.getConstant(SlideAmt, DL, XLenVT),
2620                     TrueMask, VL);
2621     return convertFromScalableVector(VT, SlideDown, DAG, Subtarget);
2622   }
2623 
2624   // Detect an interleave shuffle and lower to
2625   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2626   bool SwapSources;
2627   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2628     // Swap sources if needed.
2629     if (SwapSources)
2630       std::swap(V1, V2);
2631 
2632     // Extract the lower half of the vectors.
2633     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2634     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2635                      DAG.getConstant(0, DL, XLenVT));
2636     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2637                      DAG.getConstant(0, DL, XLenVT));
2638 
2639     // Double the element width and halve the number of elements in an int type.
2640     unsigned EltBits = VT.getScalarSizeInBits();
2641     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2642     MVT WideIntVT =
2643         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2644     // Convert this to a scalable vector. We need to base this on the
2645     // destination size to ensure there's always a type with a smaller LMUL.
2646     MVT WideIntContainerVT =
2647         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2648 
2649     // Convert sources to scalable vectors with the same element count as the
2650     // larger type.
2651     MVT HalfContainerVT = MVT::getVectorVT(
2652         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2653     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2654     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2655 
2656     // Cast sources to integer.
2657     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2658     MVT IntHalfVT =
2659         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2660     V1 = DAG.getBitcast(IntHalfVT, V1);
2661     V2 = DAG.getBitcast(IntHalfVT, V2);
2662 
2663     // Freeze V2 since we use it twice and we need to be sure that the add and
2664     // multiply see the same value.
2665     V2 = DAG.getNode(ISD::FREEZE, DL, IntHalfVT, V2);
2666 
2667     // Recreate TrueMask using the widened type's element count.
2668     MVT MaskVT =
2669         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2670     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2671 
2672     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2673     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2674                               V2, TrueMask, VL);
2675     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2676     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2677                                      DAG.getAllOnesConstant(DL, XLenVT));
2678     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2679                                    V2, Multiplier, TrueMask, VL);
2680     // Add the new copies to our previous addition giving us 2^eltbits copies of
2681     // V2. This is equivalent to shifting V2 left by eltbits. This should
2682     // combine with the vwmulu.vv above to form vwmaccu.vv.
2683     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2684                       TrueMask, VL);
2685     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2686     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2687     // vector VT.
2688     ContainerVT =
2689         MVT::getVectorVT(VT.getVectorElementType(),
2690                          WideIntContainerVT.getVectorElementCount() * 2);
2691     Add = DAG.getBitcast(ContainerVT, Add);
2692     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2693   }
2694 
2695   // Detect shuffles which can be re-expressed as vector selects; these are
2696   // shuffles in which each element in the destination is taken from an element
2697   // at the corresponding index in either source vectors.
2698   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2699     int MaskIndex = MaskIdx.value();
2700     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2701   });
2702 
2703   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2704 
2705   SmallVector<SDValue> MaskVals;
2706   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2707   // merged with a second vrgather.
2708   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2709 
2710   // By default we preserve the original operand order, and use a mask to
2711   // select LHS as true and RHS as false. However, since RVV vector selects may
2712   // feature splats but only on the LHS, we may choose to invert our mask and
2713   // instead select between RHS and LHS.
2714   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2715   bool InvertMask = IsSelect == SwapOps;
2716 
2717   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2718   // half.
2719   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2720 
2721   // Now construct the mask that will be used by the vselect or blended
2722   // vrgather operation. For vrgathers, construct the appropriate indices into
2723   // each vector.
2724   for (int MaskIndex : Mask) {
2725     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2726     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2727     if (!IsSelect) {
2728       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2729       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2730                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2731                                      : DAG.getUNDEF(XLenVT));
2732       GatherIndicesRHS.push_back(
2733           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2734                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2735       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2736         ++LHSIndexCounts[MaskIndex];
2737       if (!IsLHSOrUndefIndex)
2738         ++RHSIndexCounts[MaskIndex - NumElts];
2739     }
2740   }
2741 
2742   if (SwapOps) {
2743     std::swap(V1, V2);
2744     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2745   }
2746 
2747   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2748   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2749   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2750 
2751   if (IsSelect)
2752     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2753 
2754   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2755     // On such a large vector we're unable to use i8 as the index type.
2756     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2757     // may involve vector splitting if we're already at LMUL=8, or our
2758     // user-supplied maximum fixed-length LMUL.
2759     return SDValue();
2760   }
2761 
2762   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2763   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2764   MVT IndexVT = VT.changeTypeToInteger();
2765   // Since we can't introduce illegal index types at this stage, use i16 and
2766   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2767   // than XLenVT.
2768   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2769     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2770     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2771   }
2772 
2773   MVT IndexContainerVT =
2774       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2775 
2776   SDValue Gather;
2777   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2778   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2779   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2780     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2781   } else {
2782     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2783     // If only one index is used, we can use a "splat" vrgather.
2784     // TODO: We can splat the most-common index and fix-up any stragglers, if
2785     // that's beneficial.
2786     if (LHSIndexCounts.size() == 1) {
2787       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2788       Gather =
2789           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2790                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2791     } else {
2792       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2793       LHSIndices =
2794           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2795 
2796       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2797                            TrueMask, VL);
2798     }
2799   }
2800 
2801   // If a second vector operand is used by this shuffle, blend it in with an
2802   // additional vrgather.
2803   if (!V2.isUndef()) {
2804     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2805     // If only one index is used, we can use a "splat" vrgather.
2806     // TODO: We can splat the most-common index and fix-up any stragglers, if
2807     // that's beneficial.
2808     if (RHSIndexCounts.size() == 1) {
2809       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2810       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2811                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2812     } else {
2813       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2814       RHSIndices =
2815           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2816       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2817                        VL);
2818     }
2819 
2820     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2821     SelectMask =
2822         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2823 
2824     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2825                          Gather, VL);
2826   }
2827 
2828   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2829 }
2830 
2831 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2832   // Support splats for any type. These should type legalize well.
2833   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2834     return true;
2835 
2836   // Only support legal VTs for other shuffles for now.
2837   if (!isTypeLegal(VT))
2838     return false;
2839 
2840   MVT SVT = VT.getSimpleVT();
2841 
2842   bool SwapSources;
2843   return (matchShuffleAsSlideDown(M) >= 0) ||
2844          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2845 }
2846 
2847 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2848                                      SDLoc DL, SelectionDAG &DAG,
2849                                      const RISCVSubtarget &Subtarget) {
2850   if (VT.isScalableVector())
2851     return DAG.getFPExtendOrRound(Op, DL, VT);
2852   assert(VT.isFixedLengthVector() &&
2853          "Unexpected value type for RVV FP extend/round lowering");
2854   SDValue Mask, VL;
2855   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2856   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2857                         ? RISCVISD::FP_EXTEND_VL
2858                         : RISCVISD::FP_ROUND_VL;
2859   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2860 }
2861 
2862 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2863 // the exponent.
2864 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2865   MVT VT = Op.getSimpleValueType();
2866   unsigned EltSize = VT.getScalarSizeInBits();
2867   SDValue Src = Op.getOperand(0);
2868   SDLoc DL(Op);
2869 
2870   // We need a FP type that can represent the value.
2871   // TODO: Use f16 for i8 when possible?
2872   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2873   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2874 
2875   // Legal types should have been checked in the RISCVTargetLowering
2876   // constructor.
2877   // TODO: Splitting may make sense in some cases.
2878   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2879          "Expected legal float type!");
2880 
2881   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2882   // The trailing zero count is equal to log2 of this single bit value.
2883   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2884     SDValue Neg =
2885         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2886     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2887   }
2888 
2889   // We have a legal FP type, convert to it.
2890   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2891   // Bitcast to integer and shift the exponent to the LSB.
2892   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2893   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2894   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2895   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2896                               DAG.getConstant(ShiftAmt, DL, IntVT));
2897   // Truncate back to original type to allow vnsrl.
2898   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2899   // The exponent contains log2 of the value in biased form.
2900   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2901 
2902   // For trailing zeros, we just need to subtract the bias.
2903   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2904     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2905                        DAG.getConstant(ExponentBias, DL, VT));
2906 
2907   // For leading zeros, we need to remove the bias and convert from log2 to
2908   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2909   unsigned Adjust = ExponentBias + (EltSize - 1);
2910   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2911 }
2912 
2913 // While RVV has alignment restrictions, we should always be able to load as a
2914 // legal equivalently-sized byte-typed vector instead. This method is
2915 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2916 // the load is already correctly-aligned, it returns SDValue().
2917 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2918                                                     SelectionDAG &DAG) const {
2919   auto *Load = cast<LoadSDNode>(Op);
2920   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2921 
2922   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2923                                      Load->getMemoryVT(),
2924                                      *Load->getMemOperand()))
2925     return SDValue();
2926 
2927   SDLoc DL(Op);
2928   MVT VT = Op.getSimpleValueType();
2929   unsigned EltSizeBits = VT.getScalarSizeInBits();
2930   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2931          "Unexpected unaligned RVV load type");
2932   MVT NewVT =
2933       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2934   assert(NewVT.isValid() &&
2935          "Expecting equally-sized RVV vector types to be legal");
2936   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2937                           Load->getPointerInfo(), Load->getOriginalAlign(),
2938                           Load->getMemOperand()->getFlags());
2939   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2940 }
2941 
2942 // While RVV has alignment restrictions, we should always be able to store as a
2943 // legal equivalently-sized byte-typed vector instead. This method is
2944 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2945 // returns SDValue() if the store is already correctly aligned.
2946 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2947                                                      SelectionDAG &DAG) const {
2948   auto *Store = cast<StoreSDNode>(Op);
2949   assert(Store && Store->getValue().getValueType().isVector() &&
2950          "Expected vector store");
2951 
2952   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2953                                      Store->getMemoryVT(),
2954                                      *Store->getMemOperand()))
2955     return SDValue();
2956 
2957   SDLoc DL(Op);
2958   SDValue StoredVal = Store->getValue();
2959   MVT VT = StoredVal.getSimpleValueType();
2960   unsigned EltSizeBits = VT.getScalarSizeInBits();
2961   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2962          "Unexpected unaligned RVV store type");
2963   MVT NewVT =
2964       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2965   assert(NewVT.isValid() &&
2966          "Expecting equally-sized RVV vector types to be legal");
2967   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2968   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2969                       Store->getPointerInfo(), Store->getOriginalAlign(),
2970                       Store->getMemOperand()->getFlags());
2971 }
2972 
2973 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2974                                             SelectionDAG &DAG) const {
2975   switch (Op.getOpcode()) {
2976   default:
2977     report_fatal_error("unimplemented operand");
2978   case ISD::GlobalAddress:
2979     return lowerGlobalAddress(Op, DAG);
2980   case ISD::BlockAddress:
2981     return lowerBlockAddress(Op, DAG);
2982   case ISD::ConstantPool:
2983     return lowerConstantPool(Op, DAG);
2984   case ISD::JumpTable:
2985     return lowerJumpTable(Op, DAG);
2986   case ISD::GlobalTLSAddress:
2987     return lowerGlobalTLSAddress(Op, DAG);
2988   case ISD::SELECT:
2989     return lowerSELECT(Op, DAG);
2990   case ISD::BRCOND:
2991     return lowerBRCOND(Op, DAG);
2992   case ISD::VASTART:
2993     return lowerVASTART(Op, DAG);
2994   case ISD::FRAMEADDR:
2995     return lowerFRAMEADDR(Op, DAG);
2996   case ISD::RETURNADDR:
2997     return lowerRETURNADDR(Op, DAG);
2998   case ISD::SHL_PARTS:
2999     return lowerShiftLeftParts(Op, DAG);
3000   case ISD::SRA_PARTS:
3001     return lowerShiftRightParts(Op, DAG, true);
3002   case ISD::SRL_PARTS:
3003     return lowerShiftRightParts(Op, DAG, false);
3004   case ISD::BITCAST: {
3005     SDLoc DL(Op);
3006     EVT VT = Op.getValueType();
3007     SDValue Op0 = Op.getOperand(0);
3008     EVT Op0VT = Op0.getValueType();
3009     MVT XLenVT = Subtarget.getXLenVT();
3010     if (VT.isFixedLengthVector()) {
3011       // We can handle fixed length vector bitcasts with a simple replacement
3012       // in isel.
3013       if (Op0VT.isFixedLengthVector())
3014         return Op;
3015       // When bitcasting from scalar to fixed-length vector, insert the scalar
3016       // into a one-element vector of the result type, and perform a vector
3017       // bitcast.
3018       if (!Op0VT.isVector()) {
3019         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3020         if (!isTypeLegal(BVT))
3021           return SDValue();
3022         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3023                                               DAG.getUNDEF(BVT), Op0,
3024                                               DAG.getConstant(0, DL, XLenVT)));
3025       }
3026       return SDValue();
3027     }
3028     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3029     // thus: bitcast the vector to a one-element vector type whose element type
3030     // is the same as the result type, and extract the first element.
3031     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3032       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3033       if (!isTypeLegal(BVT))
3034         return SDValue();
3035       SDValue BVec = DAG.getBitcast(BVT, Op0);
3036       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3037                          DAG.getConstant(0, DL, XLenVT));
3038     }
3039     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3040       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3041       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3042       return FPConv;
3043     }
3044     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3045         Subtarget.hasStdExtF()) {
3046       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3047       SDValue FPConv =
3048           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3049       return FPConv;
3050     }
3051     return SDValue();
3052   }
3053   case ISD::INTRINSIC_WO_CHAIN:
3054     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3055   case ISD::INTRINSIC_W_CHAIN:
3056     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3057   case ISD::INTRINSIC_VOID:
3058     return LowerINTRINSIC_VOID(Op, DAG);
3059   case ISD::BSWAP:
3060   case ISD::BITREVERSE: {
3061     MVT VT = Op.getSimpleValueType();
3062     SDLoc DL(Op);
3063     if (Subtarget.hasStdExtZbp()) {
3064       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3065       // Start with the maximum immediate value which is the bitwidth - 1.
3066       unsigned Imm = VT.getSizeInBits() - 1;
3067       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3068       if (Op.getOpcode() == ISD::BSWAP)
3069         Imm &= ~0x7U;
3070       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3071                          DAG.getConstant(Imm, DL, VT));
3072     }
3073     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3074     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3075     // Expand bitreverse to a bswap(rev8) followed by brev8.
3076     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3077     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3078     // as brev8 by an isel pattern.
3079     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3080                        DAG.getConstant(7, DL, VT));
3081   }
3082   case ISD::FSHL:
3083   case ISD::FSHR: {
3084     MVT VT = Op.getSimpleValueType();
3085     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3086     SDLoc DL(Op);
3087     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3088     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3089     // accidentally setting the extra bit.
3090     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3091     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3092                                 DAG.getConstant(ShAmtWidth, DL, VT));
3093     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3094     // instruction use different orders. fshl will return its first operand for
3095     // shift of zero, fshr will return its second operand. fsl and fsr both
3096     // return rs1 so the ISD nodes need to have different operand orders.
3097     // Shift amount is in rs2.
3098     SDValue Op0 = Op.getOperand(0);
3099     SDValue Op1 = Op.getOperand(1);
3100     unsigned Opc = RISCVISD::FSL;
3101     if (Op.getOpcode() == ISD::FSHR) {
3102       std::swap(Op0, Op1);
3103       Opc = RISCVISD::FSR;
3104     }
3105     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3106   }
3107   case ISD::TRUNCATE: {
3108     SDLoc DL(Op);
3109     MVT VT = Op.getSimpleValueType();
3110     // Only custom-lower vector truncates
3111     if (!VT.isVector())
3112       return Op;
3113 
3114     // Truncates to mask types are handled differently
3115     if (VT.getVectorElementType() == MVT::i1)
3116       return lowerVectorMaskTrunc(Op, DAG);
3117 
3118     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3119     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3120     // truncate by one power of two at a time.
3121     MVT DstEltVT = VT.getVectorElementType();
3122 
3123     SDValue Src = Op.getOperand(0);
3124     MVT SrcVT = Src.getSimpleValueType();
3125     MVT SrcEltVT = SrcVT.getVectorElementType();
3126 
3127     assert(DstEltVT.bitsLT(SrcEltVT) &&
3128            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3129            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3130            "Unexpected vector truncate lowering");
3131 
3132     MVT ContainerVT = SrcVT;
3133     if (SrcVT.isFixedLengthVector()) {
3134       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3135       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3136     }
3137 
3138     SDValue Result = Src;
3139     SDValue Mask, VL;
3140     std::tie(Mask, VL) =
3141         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3142     LLVMContext &Context = *DAG.getContext();
3143     const ElementCount Count = ContainerVT.getVectorElementCount();
3144     do {
3145       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3146       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3147       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3148                            Mask, VL);
3149     } while (SrcEltVT != DstEltVT);
3150 
3151     if (SrcVT.isFixedLengthVector())
3152       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3153 
3154     return Result;
3155   }
3156   case ISD::ANY_EXTEND:
3157   case ISD::ZERO_EXTEND:
3158     if (Op.getOperand(0).getValueType().isVector() &&
3159         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3160       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3161     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3162   case ISD::SIGN_EXTEND:
3163     if (Op.getOperand(0).getValueType().isVector() &&
3164         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3165       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3166     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3167   case ISD::SPLAT_VECTOR_PARTS:
3168     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3169   case ISD::INSERT_VECTOR_ELT:
3170     return lowerINSERT_VECTOR_ELT(Op, DAG);
3171   case ISD::EXTRACT_VECTOR_ELT:
3172     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3173   case ISD::VSCALE: {
3174     MVT VT = Op.getSimpleValueType();
3175     SDLoc DL(Op);
3176     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3177     // We define our scalable vector types for lmul=1 to use a 64 bit known
3178     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3179     // vscale as VLENB / 8.
3180     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3181     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3182       report_fatal_error("Support for VLEN==32 is incomplete.");
3183     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3184       // We assume VLENB is a multiple of 8. We manually choose the best shift
3185       // here because SimplifyDemandedBits isn't always able to simplify it.
3186       uint64_t Val = Op.getConstantOperandVal(0);
3187       if (isPowerOf2_64(Val)) {
3188         uint64_t Log2 = Log2_64(Val);
3189         if (Log2 < 3)
3190           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3191                              DAG.getConstant(3 - Log2, DL, VT));
3192         if (Log2 > 3)
3193           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3194                              DAG.getConstant(Log2 - 3, DL, VT));
3195         return VLENB;
3196       }
3197       // If the multiplier is a multiple of 8, scale it down to avoid needing
3198       // to shift the VLENB value.
3199       if ((Val % 8) == 0)
3200         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3201                            DAG.getConstant(Val / 8, DL, VT));
3202     }
3203 
3204     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3205                                  DAG.getConstant(3, DL, VT));
3206     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3207   }
3208   case ISD::FPOWI: {
3209     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3210     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3211     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3212         Op.getOperand(1).getValueType() == MVT::i32) {
3213       SDLoc DL(Op);
3214       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3215       SDValue Powi =
3216           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3217       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3218                          DAG.getIntPtrConstant(0, DL));
3219     }
3220     return SDValue();
3221   }
3222   case ISD::FP_EXTEND: {
3223     // RVV can only do fp_extend to types double the size as the source. We
3224     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3225     // via f32.
3226     SDLoc DL(Op);
3227     MVT VT = Op.getSimpleValueType();
3228     SDValue Src = Op.getOperand(0);
3229     MVT SrcVT = Src.getSimpleValueType();
3230 
3231     // Prepare any fixed-length vector operands.
3232     MVT ContainerVT = VT;
3233     if (SrcVT.isFixedLengthVector()) {
3234       ContainerVT = getContainerForFixedLengthVector(VT);
3235       MVT SrcContainerVT =
3236           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3237       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3238     }
3239 
3240     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3241         SrcVT.getVectorElementType() != MVT::f16) {
3242       // For scalable vectors, we only need to close the gap between
3243       // vXf16->vXf64.
3244       if (!VT.isFixedLengthVector())
3245         return Op;
3246       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3247       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3248       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3249     }
3250 
3251     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3252     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3253     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3254         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3255 
3256     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3257                                            DL, DAG, Subtarget);
3258     if (VT.isFixedLengthVector())
3259       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3260     return Extend;
3261   }
3262   case ISD::FP_ROUND: {
3263     // RVV can only do fp_round to types half the size as the source. We
3264     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3265     // conversion instruction.
3266     SDLoc DL(Op);
3267     MVT VT = Op.getSimpleValueType();
3268     SDValue Src = Op.getOperand(0);
3269     MVT SrcVT = Src.getSimpleValueType();
3270 
3271     // Prepare any fixed-length vector operands.
3272     MVT ContainerVT = VT;
3273     if (VT.isFixedLengthVector()) {
3274       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3275       ContainerVT =
3276           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3277       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3278     }
3279 
3280     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3281         SrcVT.getVectorElementType() != MVT::f64) {
3282       // For scalable vectors, we only need to close the gap between
3283       // vXf64<->vXf16.
3284       if (!VT.isFixedLengthVector())
3285         return Op;
3286       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3287       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3288       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3289     }
3290 
3291     SDValue Mask, VL;
3292     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3293 
3294     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3295     SDValue IntermediateRound =
3296         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3297     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3298                                           DL, DAG, Subtarget);
3299 
3300     if (VT.isFixedLengthVector())
3301       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3302     return Round;
3303   }
3304   case ISD::FP_TO_SINT:
3305   case ISD::FP_TO_UINT:
3306   case ISD::SINT_TO_FP:
3307   case ISD::UINT_TO_FP: {
3308     // RVV can only do fp<->int conversions to types half/double the size as
3309     // the source. We custom-lower any conversions that do two hops into
3310     // sequences.
3311     MVT VT = Op.getSimpleValueType();
3312     if (!VT.isVector())
3313       return Op;
3314     SDLoc DL(Op);
3315     SDValue Src = Op.getOperand(0);
3316     MVT EltVT = VT.getVectorElementType();
3317     MVT SrcVT = Src.getSimpleValueType();
3318     MVT SrcEltVT = SrcVT.getVectorElementType();
3319     unsigned EltSize = EltVT.getSizeInBits();
3320     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3321     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3322            "Unexpected vector element types");
3323 
3324     bool IsInt2FP = SrcEltVT.isInteger();
3325     // Widening conversions
3326     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3327       if (IsInt2FP) {
3328         // Do a regular integer sign/zero extension then convert to float.
3329         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3330                                       VT.getVectorElementCount());
3331         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3332                                  ? ISD::ZERO_EXTEND
3333                                  : ISD::SIGN_EXTEND;
3334         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3335         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3336       }
3337       // FP2Int
3338       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3339       // Do one doubling fp_extend then complete the operation by converting
3340       // to int.
3341       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3342       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3343       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3344     }
3345 
3346     // Narrowing conversions
3347     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3348       if (IsInt2FP) {
3349         // One narrowing int_to_fp, then an fp_round.
3350         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3351         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3352         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3353         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3354       }
3355       // FP2Int
3356       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3357       // representable by the integer, the result is poison.
3358       MVT IVecVT =
3359           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3360                            VT.getVectorElementCount());
3361       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3362       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3363     }
3364 
3365     // Scalable vectors can exit here. Patterns will handle equally-sized
3366     // conversions halving/doubling ones.
3367     if (!VT.isFixedLengthVector())
3368       return Op;
3369 
3370     // For fixed-length vectors we lower to a custom "VL" node.
3371     unsigned RVVOpc = 0;
3372     switch (Op.getOpcode()) {
3373     default:
3374       llvm_unreachable("Impossible opcode");
3375     case ISD::FP_TO_SINT:
3376       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3377       break;
3378     case ISD::FP_TO_UINT:
3379       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3380       break;
3381     case ISD::SINT_TO_FP:
3382       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3383       break;
3384     case ISD::UINT_TO_FP:
3385       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3386       break;
3387     }
3388 
3389     MVT ContainerVT, SrcContainerVT;
3390     // Derive the reference container type from the larger vector type.
3391     if (SrcEltSize > EltSize) {
3392       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3393       ContainerVT =
3394           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3395     } else {
3396       ContainerVT = getContainerForFixedLengthVector(VT);
3397       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3398     }
3399 
3400     SDValue Mask, VL;
3401     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3402 
3403     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3404     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3405     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3406   }
3407   case ISD::FP_TO_SINT_SAT:
3408   case ISD::FP_TO_UINT_SAT:
3409     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3410   case ISD::FTRUNC:
3411   case ISD::FCEIL:
3412   case ISD::FFLOOR:
3413     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3414   case ISD::FROUND:
3415     return lowerFROUND(Op, DAG);
3416   case ISD::VECREDUCE_ADD:
3417   case ISD::VECREDUCE_UMAX:
3418   case ISD::VECREDUCE_SMAX:
3419   case ISD::VECREDUCE_UMIN:
3420   case ISD::VECREDUCE_SMIN:
3421     return lowerVECREDUCE(Op, DAG);
3422   case ISD::VECREDUCE_AND:
3423   case ISD::VECREDUCE_OR:
3424   case ISD::VECREDUCE_XOR:
3425     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3426       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3427     return lowerVECREDUCE(Op, DAG);
3428   case ISD::VECREDUCE_FADD:
3429   case ISD::VECREDUCE_SEQ_FADD:
3430   case ISD::VECREDUCE_FMIN:
3431   case ISD::VECREDUCE_FMAX:
3432     return lowerFPVECREDUCE(Op, DAG);
3433   case ISD::VP_REDUCE_ADD:
3434   case ISD::VP_REDUCE_UMAX:
3435   case ISD::VP_REDUCE_SMAX:
3436   case ISD::VP_REDUCE_UMIN:
3437   case ISD::VP_REDUCE_SMIN:
3438   case ISD::VP_REDUCE_FADD:
3439   case ISD::VP_REDUCE_SEQ_FADD:
3440   case ISD::VP_REDUCE_FMIN:
3441   case ISD::VP_REDUCE_FMAX:
3442     return lowerVPREDUCE(Op, DAG);
3443   case ISD::VP_REDUCE_AND:
3444   case ISD::VP_REDUCE_OR:
3445   case ISD::VP_REDUCE_XOR:
3446     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3447       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3448     return lowerVPREDUCE(Op, DAG);
3449   case ISD::INSERT_SUBVECTOR:
3450     return lowerINSERT_SUBVECTOR(Op, DAG);
3451   case ISD::EXTRACT_SUBVECTOR:
3452     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3453   case ISD::STEP_VECTOR:
3454     return lowerSTEP_VECTOR(Op, DAG);
3455   case ISD::VECTOR_REVERSE:
3456     return lowerVECTOR_REVERSE(Op, DAG);
3457   case ISD::BUILD_VECTOR:
3458     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3459   case ISD::SPLAT_VECTOR:
3460     if (Op.getValueType().getVectorElementType() == MVT::i1)
3461       return lowerVectorMaskSplat(Op, DAG);
3462     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3463   case ISD::VECTOR_SHUFFLE:
3464     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3465   case ISD::CONCAT_VECTORS: {
3466     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3467     // better than going through the stack, as the default expansion does.
3468     SDLoc DL(Op);
3469     MVT VT = Op.getSimpleValueType();
3470     unsigned NumOpElts =
3471         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3472     SDValue Vec = DAG.getUNDEF(VT);
3473     for (const auto &OpIdx : enumerate(Op->ops())) {
3474       SDValue SubVec = OpIdx.value();
3475       // Don't insert undef subvectors.
3476       if (SubVec.isUndef())
3477         continue;
3478       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3479                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3480     }
3481     return Vec;
3482   }
3483   case ISD::LOAD:
3484     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3485       return V;
3486     if (Op.getValueType().isFixedLengthVector())
3487       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3488     return Op;
3489   case ISD::STORE:
3490     if (auto V = expandUnalignedRVVStore(Op, DAG))
3491       return V;
3492     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3493       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3494     return Op;
3495   case ISD::MLOAD:
3496   case ISD::VP_LOAD:
3497     return lowerMaskedLoad(Op, DAG);
3498   case ISD::MSTORE:
3499   case ISD::VP_STORE:
3500     return lowerMaskedStore(Op, DAG);
3501   case ISD::SETCC:
3502     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3503   case ISD::ADD:
3504     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3505   case ISD::SUB:
3506     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3507   case ISD::MUL:
3508     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3509   case ISD::MULHS:
3510     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3511   case ISD::MULHU:
3512     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3513   case ISD::AND:
3514     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3515                                               RISCVISD::AND_VL);
3516   case ISD::OR:
3517     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3518                                               RISCVISD::OR_VL);
3519   case ISD::XOR:
3520     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3521                                               RISCVISD::XOR_VL);
3522   case ISD::SDIV:
3523     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3524   case ISD::SREM:
3525     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3526   case ISD::UDIV:
3527     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3528   case ISD::UREM:
3529     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3530   case ISD::SHL:
3531   case ISD::SRA:
3532   case ISD::SRL:
3533     if (Op.getSimpleValueType().isFixedLengthVector())
3534       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3535     // This can be called for an i32 shift amount that needs to be promoted.
3536     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3537            "Unexpected custom legalisation");
3538     return SDValue();
3539   case ISD::SADDSAT:
3540     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3541   case ISD::UADDSAT:
3542     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3543   case ISD::SSUBSAT:
3544     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3545   case ISD::USUBSAT:
3546     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3547   case ISD::FADD:
3548     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3549   case ISD::FSUB:
3550     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3551   case ISD::FMUL:
3552     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3553   case ISD::FDIV:
3554     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3555   case ISD::FNEG:
3556     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3557   case ISD::FABS:
3558     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3559   case ISD::FSQRT:
3560     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3561   case ISD::FMA:
3562     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3563   case ISD::SMIN:
3564     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3565   case ISD::SMAX:
3566     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3567   case ISD::UMIN:
3568     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3569   case ISD::UMAX:
3570     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3571   case ISD::FMINNUM:
3572     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3573   case ISD::FMAXNUM:
3574     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3575   case ISD::ABS:
3576     return lowerABS(Op, DAG);
3577   case ISD::CTLZ_ZERO_UNDEF:
3578   case ISD::CTTZ_ZERO_UNDEF:
3579     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3580   case ISD::VSELECT:
3581     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3582   case ISD::FCOPYSIGN:
3583     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3584   case ISD::MGATHER:
3585   case ISD::VP_GATHER:
3586     return lowerMaskedGather(Op, DAG);
3587   case ISD::MSCATTER:
3588   case ISD::VP_SCATTER:
3589     return lowerMaskedScatter(Op, DAG);
3590   case ISD::FLT_ROUNDS_:
3591     return lowerGET_ROUNDING(Op, DAG);
3592   case ISD::SET_ROUNDING:
3593     return lowerSET_ROUNDING(Op, DAG);
3594   case ISD::VP_SELECT:
3595     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3596   case ISD::VP_MERGE:
3597     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3598   case ISD::VP_ADD:
3599     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3600   case ISD::VP_SUB:
3601     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3602   case ISD::VP_MUL:
3603     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3604   case ISD::VP_SDIV:
3605     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3606   case ISD::VP_UDIV:
3607     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3608   case ISD::VP_SREM:
3609     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3610   case ISD::VP_UREM:
3611     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3612   case ISD::VP_AND:
3613     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3614   case ISD::VP_OR:
3615     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3616   case ISD::VP_XOR:
3617     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3618   case ISD::VP_ASHR:
3619     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3620   case ISD::VP_LSHR:
3621     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3622   case ISD::VP_SHL:
3623     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3624   case ISD::VP_FADD:
3625     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3626   case ISD::VP_FSUB:
3627     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3628   case ISD::VP_FMUL:
3629     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3630   case ISD::VP_FDIV:
3631     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3632   }
3633 }
3634 
3635 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3636                              SelectionDAG &DAG, unsigned Flags) {
3637   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3638 }
3639 
3640 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3641                              SelectionDAG &DAG, unsigned Flags) {
3642   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3643                                    Flags);
3644 }
3645 
3646 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3647                              SelectionDAG &DAG, unsigned Flags) {
3648   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3649                                    N->getOffset(), Flags);
3650 }
3651 
3652 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3653                              SelectionDAG &DAG, unsigned Flags) {
3654   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3655 }
3656 
3657 template <class NodeTy>
3658 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3659                                      bool IsLocal) const {
3660   SDLoc DL(N);
3661   EVT Ty = getPointerTy(DAG.getDataLayout());
3662 
3663   if (isPositionIndependent()) {
3664     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3665     if (IsLocal)
3666       // Use PC-relative addressing to access the symbol. This generates the
3667       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3668       // %pcrel_lo(auipc)).
3669       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3670 
3671     // Use PC-relative addressing to access the GOT for this symbol, then load
3672     // the address from the GOT. This generates the pattern (PseudoLA sym),
3673     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3674     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3675   }
3676 
3677   switch (getTargetMachine().getCodeModel()) {
3678   default:
3679     report_fatal_error("Unsupported code model for lowering");
3680   case CodeModel::Small: {
3681     // Generate a sequence for accessing addresses within the first 2 GiB of
3682     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3683     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3684     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3685     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3686     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3687   }
3688   case CodeModel::Medium: {
3689     // Generate a sequence for accessing addresses within any 2GiB range within
3690     // the address space. This generates the pattern (PseudoLLA sym), which
3691     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3692     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3693     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3694   }
3695   }
3696 }
3697 
3698 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3699                                                 SelectionDAG &DAG) const {
3700   SDLoc DL(Op);
3701   EVT Ty = Op.getValueType();
3702   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3703   int64_t Offset = N->getOffset();
3704   MVT XLenVT = Subtarget.getXLenVT();
3705 
3706   const GlobalValue *GV = N->getGlobal();
3707   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3708   SDValue Addr = getAddr(N, DAG, IsLocal);
3709 
3710   // In order to maximise the opportunity for common subexpression elimination,
3711   // emit a separate ADD node for the global address offset instead of folding
3712   // it in the global address node. Later peephole optimisations may choose to
3713   // fold it back in when profitable.
3714   if (Offset != 0)
3715     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3716                        DAG.getConstant(Offset, DL, XLenVT));
3717   return Addr;
3718 }
3719 
3720 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3721                                                SelectionDAG &DAG) const {
3722   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3723 
3724   return getAddr(N, DAG);
3725 }
3726 
3727 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3728                                                SelectionDAG &DAG) const {
3729   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3730 
3731   return getAddr(N, DAG);
3732 }
3733 
3734 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3735                                             SelectionDAG &DAG) const {
3736   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3737 
3738   return getAddr(N, DAG);
3739 }
3740 
3741 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3742                                               SelectionDAG &DAG,
3743                                               bool UseGOT) const {
3744   SDLoc DL(N);
3745   EVT Ty = getPointerTy(DAG.getDataLayout());
3746   const GlobalValue *GV = N->getGlobal();
3747   MVT XLenVT = Subtarget.getXLenVT();
3748 
3749   if (UseGOT) {
3750     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3751     // load the address from the GOT and add the thread pointer. This generates
3752     // the pattern (PseudoLA_TLS_IE sym), which expands to
3753     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3754     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3755     SDValue Load =
3756         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3757 
3758     // Add the thread pointer.
3759     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3760     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3761   }
3762 
3763   // Generate a sequence for accessing the address relative to the thread
3764   // pointer, with the appropriate adjustment for the thread pointer offset.
3765   // This generates the pattern
3766   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3767   SDValue AddrHi =
3768       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3769   SDValue AddrAdd =
3770       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3771   SDValue AddrLo =
3772       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3773 
3774   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3775   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3776   SDValue MNAdd = SDValue(
3777       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3778       0);
3779   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3780 }
3781 
3782 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3783                                                SelectionDAG &DAG) const {
3784   SDLoc DL(N);
3785   EVT Ty = getPointerTy(DAG.getDataLayout());
3786   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3787   const GlobalValue *GV = N->getGlobal();
3788 
3789   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3790   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3791   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3792   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3793   SDValue Load =
3794       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3795 
3796   // Prepare argument list to generate call.
3797   ArgListTy Args;
3798   ArgListEntry Entry;
3799   Entry.Node = Load;
3800   Entry.Ty = CallTy;
3801   Args.push_back(Entry);
3802 
3803   // Setup call to __tls_get_addr.
3804   TargetLowering::CallLoweringInfo CLI(DAG);
3805   CLI.setDebugLoc(DL)
3806       .setChain(DAG.getEntryNode())
3807       .setLibCallee(CallingConv::C, CallTy,
3808                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3809                     std::move(Args));
3810 
3811   return LowerCallTo(CLI).first;
3812 }
3813 
3814 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3815                                                    SelectionDAG &DAG) const {
3816   SDLoc DL(Op);
3817   EVT Ty = Op.getValueType();
3818   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3819   int64_t Offset = N->getOffset();
3820   MVT XLenVT = Subtarget.getXLenVT();
3821 
3822   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3823 
3824   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3825       CallingConv::GHC)
3826     report_fatal_error("In GHC calling convention TLS is not supported");
3827 
3828   SDValue Addr;
3829   switch (Model) {
3830   case TLSModel::LocalExec:
3831     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3832     break;
3833   case TLSModel::InitialExec:
3834     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3835     break;
3836   case TLSModel::LocalDynamic:
3837   case TLSModel::GeneralDynamic:
3838     Addr = getDynamicTLSAddr(N, DAG);
3839     break;
3840   }
3841 
3842   // In order to maximise the opportunity for common subexpression elimination,
3843   // emit a separate ADD node for the global address offset instead of folding
3844   // it in the global address node. Later peephole optimisations may choose to
3845   // fold it back in when profitable.
3846   if (Offset != 0)
3847     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3848                        DAG.getConstant(Offset, DL, XLenVT));
3849   return Addr;
3850 }
3851 
3852 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3853   SDValue CondV = Op.getOperand(0);
3854   SDValue TrueV = Op.getOperand(1);
3855   SDValue FalseV = Op.getOperand(2);
3856   SDLoc DL(Op);
3857   MVT VT = Op.getSimpleValueType();
3858   MVT XLenVT = Subtarget.getXLenVT();
3859 
3860   // Lower vector SELECTs to VSELECTs by splatting the condition.
3861   if (VT.isVector()) {
3862     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3863     SDValue CondSplat = VT.isScalableVector()
3864                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3865                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3866     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3867   }
3868 
3869   // If the result type is XLenVT and CondV is the output of a SETCC node
3870   // which also operated on XLenVT inputs, then merge the SETCC node into the
3871   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3872   // compare+branch instructions. i.e.:
3873   // (select (setcc lhs, rhs, cc), truev, falsev)
3874   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3875   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3876       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3877     SDValue LHS = CondV.getOperand(0);
3878     SDValue RHS = CondV.getOperand(1);
3879     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3880     ISD::CondCode CCVal = CC->get();
3881 
3882     // Special case for a select of 2 constants that have a diffence of 1.
3883     // Normally this is done by DAGCombine, but if the select is introduced by
3884     // type legalization or op legalization, we miss it. Restricting to SETLT
3885     // case for now because that is what signed saturating add/sub need.
3886     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3887     // but we would probably want to swap the true/false values if the condition
3888     // is SETGE/SETLE to avoid an XORI.
3889     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3890         CCVal == ISD::SETLT) {
3891       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3892       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3893       if (TrueVal - 1 == FalseVal)
3894         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3895       if (TrueVal + 1 == FalseVal)
3896         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3897     }
3898 
3899     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3900 
3901     SDValue TargetCC = DAG.getCondCode(CCVal);
3902     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3903     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3904   }
3905 
3906   // Otherwise:
3907   // (select condv, truev, falsev)
3908   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3909   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3910   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3911 
3912   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3913 
3914   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3915 }
3916 
3917 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3918   SDValue CondV = Op.getOperand(1);
3919   SDLoc DL(Op);
3920   MVT XLenVT = Subtarget.getXLenVT();
3921 
3922   if (CondV.getOpcode() == ISD::SETCC &&
3923       CondV.getOperand(0).getValueType() == XLenVT) {
3924     SDValue LHS = CondV.getOperand(0);
3925     SDValue RHS = CondV.getOperand(1);
3926     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3927 
3928     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3929 
3930     SDValue TargetCC = DAG.getCondCode(CCVal);
3931     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3932                        LHS, RHS, TargetCC, Op.getOperand(2));
3933   }
3934 
3935   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3936                      CondV, DAG.getConstant(0, DL, XLenVT),
3937                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3938 }
3939 
3940 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3941   MachineFunction &MF = DAG.getMachineFunction();
3942   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3943 
3944   SDLoc DL(Op);
3945   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3946                                  getPointerTy(MF.getDataLayout()));
3947 
3948   // vastart just stores the address of the VarArgsFrameIndex slot into the
3949   // memory location argument.
3950   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3951   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3952                       MachinePointerInfo(SV));
3953 }
3954 
3955 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3956                                             SelectionDAG &DAG) const {
3957   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3958   MachineFunction &MF = DAG.getMachineFunction();
3959   MachineFrameInfo &MFI = MF.getFrameInfo();
3960   MFI.setFrameAddressIsTaken(true);
3961   Register FrameReg = RI.getFrameRegister(MF);
3962   int XLenInBytes = Subtarget.getXLen() / 8;
3963 
3964   EVT VT = Op.getValueType();
3965   SDLoc DL(Op);
3966   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3967   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3968   while (Depth--) {
3969     int Offset = -(XLenInBytes * 2);
3970     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3971                               DAG.getIntPtrConstant(Offset, DL));
3972     FrameAddr =
3973         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3974   }
3975   return FrameAddr;
3976 }
3977 
3978 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3979                                              SelectionDAG &DAG) const {
3980   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3981   MachineFunction &MF = DAG.getMachineFunction();
3982   MachineFrameInfo &MFI = MF.getFrameInfo();
3983   MFI.setReturnAddressIsTaken(true);
3984   MVT XLenVT = Subtarget.getXLenVT();
3985   int XLenInBytes = Subtarget.getXLen() / 8;
3986 
3987   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3988     return SDValue();
3989 
3990   EVT VT = Op.getValueType();
3991   SDLoc DL(Op);
3992   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3993   if (Depth) {
3994     int Off = -XLenInBytes;
3995     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3996     SDValue Offset = DAG.getConstant(Off, DL, VT);
3997     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3998                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3999                        MachinePointerInfo());
4000   }
4001 
4002   // Return the value of the return address register, marking it an implicit
4003   // live-in.
4004   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4005   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4006 }
4007 
4008 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4009                                                  SelectionDAG &DAG) const {
4010   SDLoc DL(Op);
4011   SDValue Lo = Op.getOperand(0);
4012   SDValue Hi = Op.getOperand(1);
4013   SDValue Shamt = Op.getOperand(2);
4014   EVT VT = Lo.getValueType();
4015 
4016   // if Shamt-XLEN < 0: // Shamt < XLEN
4017   //   Lo = Lo << Shamt
4018   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
4019   // else:
4020   //   Lo = 0
4021   //   Hi = Lo << (Shamt-XLEN)
4022 
4023   SDValue Zero = DAG.getConstant(0, DL, VT);
4024   SDValue One = DAG.getConstant(1, DL, VT);
4025   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4026   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4027   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4028   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
4029 
4030   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4031   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4032   SDValue ShiftRightLo =
4033       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4034   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4035   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4036   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4037 
4038   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4039 
4040   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4041   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4042 
4043   SDValue Parts[2] = {Lo, Hi};
4044   return DAG.getMergeValues(Parts, DL);
4045 }
4046 
4047 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4048                                                   bool IsSRA) const {
4049   SDLoc DL(Op);
4050   SDValue Lo = Op.getOperand(0);
4051   SDValue Hi = Op.getOperand(1);
4052   SDValue Shamt = Op.getOperand(2);
4053   EVT VT = Lo.getValueType();
4054 
4055   // SRA expansion:
4056   //   if Shamt-XLEN < 0: // Shamt < XLEN
4057   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
4058   //     Hi = Hi >>s Shamt
4059   //   else:
4060   //     Lo = Hi >>s (Shamt-XLEN);
4061   //     Hi = Hi >>s (XLEN-1)
4062   //
4063   // SRL expansion:
4064   //   if Shamt-XLEN < 0: // Shamt < XLEN
4065   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
4066   //     Hi = Hi >>u Shamt
4067   //   else:
4068   //     Lo = Hi >>u (Shamt-XLEN);
4069   //     Hi = 0;
4070 
4071   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4072 
4073   SDValue Zero = DAG.getConstant(0, DL, VT);
4074   SDValue One = DAG.getConstant(1, DL, VT);
4075   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4076   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4077   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4078   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
4079 
4080   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4081   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4082   SDValue ShiftLeftHi =
4083       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4084   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4085   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4086   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4087   SDValue HiFalse =
4088       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4089 
4090   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4091 
4092   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4093   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4094 
4095   SDValue Parts[2] = {Lo, Hi};
4096   return DAG.getMergeValues(Parts, DL);
4097 }
4098 
4099 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4100 // legal equivalently-sized i8 type, so we can use that as a go-between.
4101 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4102                                                   SelectionDAG &DAG) const {
4103   SDLoc DL(Op);
4104   MVT VT = Op.getSimpleValueType();
4105   SDValue SplatVal = Op.getOperand(0);
4106   // All-zeros or all-ones splats are handled specially.
4107   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4108     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4109     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4110   }
4111   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4112     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4113     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4114   }
4115   MVT XLenVT = Subtarget.getXLenVT();
4116   assert(SplatVal.getValueType() == XLenVT &&
4117          "Unexpected type for i1 splat value");
4118   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4119   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4120                          DAG.getConstant(1, DL, XLenVT));
4121   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4122   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4123   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4124 }
4125 
4126 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4127 // illegal (currently only vXi64 RV32).
4128 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4129 // them to VMV_V_X_VL.
4130 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4131                                                      SelectionDAG &DAG) const {
4132   SDLoc DL(Op);
4133   MVT VecVT = Op.getSimpleValueType();
4134   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4135          "Unexpected SPLAT_VECTOR_PARTS lowering");
4136 
4137   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4138   SDValue Lo = Op.getOperand(0);
4139   SDValue Hi = Op.getOperand(1);
4140 
4141   if (VecVT.isFixedLengthVector()) {
4142     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4143     SDLoc DL(Op);
4144     SDValue Mask, VL;
4145     std::tie(Mask, VL) =
4146         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4147 
4148     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
4149     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4150   }
4151 
4152   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4153     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4154     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4155     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4156     // node in order to try and match RVV vector/scalar instructions.
4157     if ((LoC >> 31) == HiC)
4158       return DAG.getNode(
4159           RISCVISD::VMV_V_X_VL, DL, VecVT, Lo,
4160           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4161   }
4162 
4163   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4164   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4165       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4166       Hi.getConstantOperandVal(1) == 31)
4167     return DAG.getNode(
4168         RISCVISD::VMV_V_X_VL, DL, VecVT, Lo,
4169         DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4170 
4171   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4172   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
4173                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4174 }
4175 
4176 // Custom-lower extensions from mask vectors by using a vselect either with 1
4177 // for zero/any-extension or -1 for sign-extension:
4178 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4179 // Note that any-extension is lowered identically to zero-extension.
4180 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4181                                                 int64_t ExtTrueVal) const {
4182   SDLoc DL(Op);
4183   MVT VecVT = Op.getSimpleValueType();
4184   SDValue Src = Op.getOperand(0);
4185   // Only custom-lower extensions from mask types
4186   assert(Src.getValueType().isVector() &&
4187          Src.getValueType().getVectorElementType() == MVT::i1);
4188 
4189   MVT XLenVT = Subtarget.getXLenVT();
4190   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4191   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4192 
4193   if (VecVT.isScalableVector()) {
4194     // Be careful not to introduce illegal scalar types at this stage, and be
4195     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4196     // illegal and must be expanded. Since we know that the constants are
4197     // sign-extended 32-bit values, we use VMV_V_X_VL directly.
4198     bool IsRV32E64 =
4199         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4200 
4201     if (!IsRV32E64) {
4202       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4203       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4204     } else {
4205       SplatZero =
4206           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatZero,
4207                       DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
4208       SplatTrueVal =
4209           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatTrueVal,
4210                       DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
4211     }
4212 
4213     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4214   }
4215 
4216   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4217   MVT I1ContainerVT =
4218       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4219 
4220   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4221 
4222   SDValue Mask, VL;
4223   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4224 
4225   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
4226   SplatTrueVal =
4227       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
4228   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4229                                SplatTrueVal, SplatZero, VL);
4230 
4231   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4232 }
4233 
4234 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4235     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4236   MVT ExtVT = Op.getSimpleValueType();
4237   // Only custom-lower extensions from fixed-length vector types.
4238   if (!ExtVT.isFixedLengthVector())
4239     return Op;
4240   MVT VT = Op.getOperand(0).getSimpleValueType();
4241   // Grab the canonical container type for the extended type. Infer the smaller
4242   // type from that to ensure the same number of vector elements, as we know
4243   // the LMUL will be sufficient to hold the smaller type.
4244   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4245   // Get the extended container type manually to ensure the same number of
4246   // vector elements between source and dest.
4247   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4248                                      ContainerExtVT.getVectorElementCount());
4249 
4250   SDValue Op1 =
4251       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4252 
4253   SDLoc DL(Op);
4254   SDValue Mask, VL;
4255   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4256 
4257   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4258 
4259   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4260 }
4261 
4262 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4263 // setcc operation:
4264 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4265 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4266                                                   SelectionDAG &DAG) const {
4267   SDLoc DL(Op);
4268   EVT MaskVT = Op.getValueType();
4269   // Only expect to custom-lower truncations to mask types
4270   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4271          "Unexpected type for vector mask lowering");
4272   SDValue Src = Op.getOperand(0);
4273   MVT VecVT = Src.getSimpleValueType();
4274 
4275   // If this is a fixed vector, we need to convert it to a scalable vector.
4276   MVT ContainerVT = VecVT;
4277   if (VecVT.isFixedLengthVector()) {
4278     ContainerVT = getContainerForFixedLengthVector(VecVT);
4279     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4280   }
4281 
4282   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4283   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4284 
4285   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
4286   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
4287 
4288   if (VecVT.isScalableVector()) {
4289     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4290     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4291   }
4292 
4293   SDValue Mask, VL;
4294   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4295 
4296   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4297   SDValue Trunc =
4298       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4299   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4300                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4301   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4302 }
4303 
4304 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4305 // first position of a vector, and that vector is slid up to the insert index.
4306 // By limiting the active vector length to index+1 and merging with the
4307 // original vector (with an undisturbed tail policy for elements >= VL), we
4308 // achieve the desired result of leaving all elements untouched except the one
4309 // at VL-1, which is replaced with the desired value.
4310 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4311                                                     SelectionDAG &DAG) const {
4312   SDLoc DL(Op);
4313   MVT VecVT = Op.getSimpleValueType();
4314   SDValue Vec = Op.getOperand(0);
4315   SDValue Val = Op.getOperand(1);
4316   SDValue Idx = Op.getOperand(2);
4317 
4318   if (VecVT.getVectorElementType() == MVT::i1) {
4319     // FIXME: For now we just promote to an i8 vector and insert into that,
4320     // but this is probably not optimal.
4321     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4322     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4323     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4324     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4325   }
4326 
4327   MVT ContainerVT = VecVT;
4328   // If the operand is a fixed-length vector, convert to a scalable one.
4329   if (VecVT.isFixedLengthVector()) {
4330     ContainerVT = getContainerForFixedLengthVector(VecVT);
4331     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4332   }
4333 
4334   MVT XLenVT = Subtarget.getXLenVT();
4335 
4336   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4337   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4338   // Even i64-element vectors on RV32 can be lowered without scalar
4339   // legalization if the most-significant 32 bits of the value are not affected
4340   // by the sign-extension of the lower 32 bits.
4341   // TODO: We could also catch sign extensions of a 32-bit value.
4342   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4343     const auto *CVal = cast<ConstantSDNode>(Val);
4344     if (isInt<32>(CVal->getSExtValue())) {
4345       IsLegalInsert = true;
4346       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4347     }
4348   }
4349 
4350   SDValue Mask, VL;
4351   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4352 
4353   SDValue ValInVec;
4354 
4355   if (IsLegalInsert) {
4356     unsigned Opc =
4357         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4358     if (isNullConstant(Idx)) {
4359       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4360       if (!VecVT.isFixedLengthVector())
4361         return Vec;
4362       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4363     }
4364     ValInVec =
4365         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4366   } else {
4367     // On RV32, i64-element vectors must be specially handled to place the
4368     // value at element 0, by using two vslide1up instructions in sequence on
4369     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4370     // this.
4371     SDValue One = DAG.getConstant(1, DL, XLenVT);
4372     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4373     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4374     MVT I32ContainerVT =
4375         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4376     SDValue I32Mask =
4377         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4378     // Limit the active VL to two.
4379     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4380     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4381     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4382     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
4383                            InsertI64VL);
4384     // First slide in the hi value, then the lo in underneath it.
4385     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4386                            ValHi, I32Mask, InsertI64VL);
4387     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4388                            ValLo, I32Mask, InsertI64VL);
4389     // Bitcast back to the right container type.
4390     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4391   }
4392 
4393   // Now that the value is in a vector, slide it into position.
4394   SDValue InsertVL =
4395       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4396   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4397                                 ValInVec, Idx, Mask, InsertVL);
4398   if (!VecVT.isFixedLengthVector())
4399     return Slideup;
4400   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4401 }
4402 
4403 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4404 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4405 // types this is done using VMV_X_S to allow us to glean information about the
4406 // sign bits of the result.
4407 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4408                                                      SelectionDAG &DAG) const {
4409   SDLoc DL(Op);
4410   SDValue Idx = Op.getOperand(1);
4411   SDValue Vec = Op.getOperand(0);
4412   EVT EltVT = Op.getValueType();
4413   MVT VecVT = Vec.getSimpleValueType();
4414   MVT XLenVT = Subtarget.getXLenVT();
4415 
4416   if (VecVT.getVectorElementType() == MVT::i1) {
4417     if (VecVT.isFixedLengthVector()) {
4418       unsigned NumElts = VecVT.getVectorNumElements();
4419       if (NumElts >= 8) {
4420         MVT WideEltVT;
4421         unsigned WidenVecLen;
4422         SDValue ExtractElementIdx;
4423         SDValue ExtractBitIdx;
4424         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4425         MVT LargestEltVT = MVT::getIntegerVT(
4426             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4427         if (NumElts <= LargestEltVT.getSizeInBits()) {
4428           assert(isPowerOf2_32(NumElts) &&
4429                  "the number of elements should be power of 2");
4430           WideEltVT = MVT::getIntegerVT(NumElts);
4431           WidenVecLen = 1;
4432           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4433           ExtractBitIdx = Idx;
4434         } else {
4435           WideEltVT = LargestEltVT;
4436           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4437           // extract element index = index / element width
4438           ExtractElementIdx = DAG.getNode(
4439               ISD::SRL, DL, XLenVT, Idx,
4440               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4441           // mask bit index = index % element width
4442           ExtractBitIdx = DAG.getNode(
4443               ISD::AND, DL, XLenVT, Idx,
4444               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4445         }
4446         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4447         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4448         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4449                                          Vec, ExtractElementIdx);
4450         // Extract the bit from GPR.
4451         SDValue ShiftRight =
4452             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4453         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4454                            DAG.getConstant(1, DL, XLenVT));
4455       }
4456     }
4457     // Otherwise, promote to an i8 vector and extract from that.
4458     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4459     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4460     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4461   }
4462 
4463   // If this is a fixed vector, we need to convert it to a scalable vector.
4464   MVT ContainerVT = VecVT;
4465   if (VecVT.isFixedLengthVector()) {
4466     ContainerVT = getContainerForFixedLengthVector(VecVT);
4467     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4468   }
4469 
4470   // If the index is 0, the vector is already in the right position.
4471   if (!isNullConstant(Idx)) {
4472     // Use a VL of 1 to avoid processing more elements than we need.
4473     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4474     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4475     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4476     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4477                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4478   }
4479 
4480   if (!EltVT.isInteger()) {
4481     // Floating-point extracts are handled in TableGen.
4482     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4483                        DAG.getConstant(0, DL, XLenVT));
4484   }
4485 
4486   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4487   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4488 }
4489 
4490 // Some RVV intrinsics may claim that they want an integer operand to be
4491 // promoted or expanded.
4492 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4493                                           const RISCVSubtarget &Subtarget) {
4494   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4495           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4496          "Unexpected opcode");
4497 
4498   if (!Subtarget.hasVInstructions())
4499     return SDValue();
4500 
4501   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4502   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4503   SDLoc DL(Op);
4504 
4505   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4506       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4507   if (!II || !II->hasSplatOperand())
4508     return SDValue();
4509 
4510   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4511   assert(SplatOp < Op.getNumOperands());
4512 
4513   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4514   SDValue &ScalarOp = Operands[SplatOp];
4515   MVT OpVT = ScalarOp.getSimpleValueType();
4516   MVT XLenVT = Subtarget.getXLenVT();
4517 
4518   // If this isn't a scalar, or its type is XLenVT we're done.
4519   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4520     return SDValue();
4521 
4522   // Simplest case is that the operand needs to be promoted to XLenVT.
4523   if (OpVT.bitsLT(XLenVT)) {
4524     // If the operand is a constant, sign extend to increase our chances
4525     // of being able to use a .vi instruction. ANY_EXTEND would become a
4526     // a zero extend and the simm5 check in isel would fail.
4527     // FIXME: Should we ignore the upper bits in isel instead?
4528     unsigned ExtOpc =
4529         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4530     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4531     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4532   }
4533 
4534   // Use the previous operand to get the vXi64 VT. The result might be a mask
4535   // VT for compares. Using the previous operand assumes that the previous
4536   // operand will never have a smaller element size than a scalar operand and
4537   // that a widening operation never uses SEW=64.
4538   // NOTE: If this fails the below assert, we can probably just find the
4539   // element count from any operand or result and use it to construct the VT.
4540   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4541   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4542 
4543   // The more complex case is when the scalar is larger than XLenVT.
4544   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4545          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4546 
4547   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4548   // on the instruction to sign-extend since SEW>XLEN.
4549   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4550     if (isInt<32>(CVal->getSExtValue())) {
4551       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4552       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4553     }
4554   }
4555 
4556   // We need to convert the scalar to a splat vector.
4557   // FIXME: Can we implicitly truncate the scalar if it is known to
4558   // be sign extended?
4559   SDValue VL = getVLOperand(Op);
4560   assert(VL.getValueType() == XLenVT);
4561   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
4562   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4563 }
4564 
4565 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4566                                                      SelectionDAG &DAG) const {
4567   unsigned IntNo = Op.getConstantOperandVal(0);
4568   SDLoc DL(Op);
4569   MVT XLenVT = Subtarget.getXLenVT();
4570 
4571   switch (IntNo) {
4572   default:
4573     break; // Don't custom lower most intrinsics.
4574   case Intrinsic::thread_pointer: {
4575     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4576     return DAG.getRegister(RISCV::X4, PtrVT);
4577   }
4578   case Intrinsic::riscv_orc_b:
4579   case Intrinsic::riscv_brev8: {
4580     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4581     unsigned Opc =
4582         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4583     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4584                        DAG.getConstant(7, DL, XLenVT));
4585   }
4586   case Intrinsic::riscv_grev:
4587   case Intrinsic::riscv_gorc: {
4588     unsigned Opc =
4589         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4590     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4591   }
4592   case Intrinsic::riscv_zip:
4593   case Intrinsic::riscv_unzip: {
4594     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4595     // For i32 the immdiate is 15. For i64 the immediate is 31.
4596     unsigned Opc =
4597         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4598     unsigned BitWidth = Op.getValueSizeInBits();
4599     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4600     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4601                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4602   }
4603   case Intrinsic::riscv_shfl:
4604   case Intrinsic::riscv_unshfl: {
4605     unsigned Opc =
4606         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4607     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4608   }
4609   case Intrinsic::riscv_bcompress:
4610   case Intrinsic::riscv_bdecompress: {
4611     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4612                                                        : RISCVISD::BDECOMPRESS;
4613     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4614   }
4615   case Intrinsic::riscv_bfp:
4616     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4617                        Op.getOperand(2));
4618   case Intrinsic::riscv_fsl:
4619     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4620                        Op.getOperand(2), Op.getOperand(3));
4621   case Intrinsic::riscv_fsr:
4622     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4623                        Op.getOperand(2), Op.getOperand(3));
4624   case Intrinsic::riscv_vmv_x_s:
4625     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4626     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4627                        Op.getOperand(1));
4628   case Intrinsic::riscv_vmv_v_x:
4629     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4630                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4631   case Intrinsic::riscv_vfmv_v_f:
4632     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4633                        Op.getOperand(1), Op.getOperand(2));
4634   case Intrinsic::riscv_vmv_s_x: {
4635     SDValue Scalar = Op.getOperand(2);
4636 
4637     if (Scalar.getValueType().bitsLE(XLenVT)) {
4638       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4639       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4640                          Op.getOperand(1), Scalar, Op.getOperand(3));
4641     }
4642 
4643     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4644 
4645     // This is an i64 value that lives in two scalar registers. We have to
4646     // insert this in a convoluted way. First we build vXi64 splat containing
4647     // the/ two values that we assemble using some bit math. Next we'll use
4648     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4649     // to merge element 0 from our splat into the source vector.
4650     // FIXME: This is probably not the best way to do this, but it is
4651     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4652     // point.
4653     //   sw lo, (a0)
4654     //   sw hi, 4(a0)
4655     //   vlse vX, (a0)
4656     //
4657     //   vid.v      vVid
4658     //   vmseq.vx   mMask, vVid, 0
4659     //   vmerge.vvm vDest, vSrc, vVal, mMask
4660     MVT VT = Op.getSimpleValueType();
4661     SDValue Vec = Op.getOperand(1);
4662     SDValue VL = getVLOperand(Op);
4663 
4664     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4665     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4666                                       DAG.getConstant(0, DL, MVT::i32), VL);
4667 
4668     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4669     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4670     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4671     SDValue SelectCond =
4672         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4673                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4674     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4675                        Vec, VL);
4676   }
4677   case Intrinsic::riscv_vslide1up:
4678   case Intrinsic::riscv_vslide1down:
4679   case Intrinsic::riscv_vslide1up_mask:
4680   case Intrinsic::riscv_vslide1down_mask: {
4681     // We need to special case these when the scalar is larger than XLen.
4682     unsigned NumOps = Op.getNumOperands();
4683     bool IsMasked = NumOps == 7;
4684     unsigned OpOffset = IsMasked ? 1 : 0;
4685     SDValue Scalar = Op.getOperand(2 + OpOffset);
4686     if (Scalar.getValueType().bitsLE(XLenVT))
4687       break;
4688 
4689     // Splatting a sign extended constant is fine.
4690     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4691       if (isInt<32>(CVal->getSExtValue()))
4692         break;
4693 
4694     MVT VT = Op.getSimpleValueType();
4695     assert(VT.getVectorElementType() == MVT::i64 &&
4696            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4697 
4698     // Convert the vector source to the equivalent nxvXi32 vector.
4699     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4700     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4701 
4702     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4703                                    DAG.getConstant(0, DL, XLenVT));
4704     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4705                                    DAG.getConstant(1, DL, XLenVT));
4706 
4707     // Double the VL since we halved SEW.
4708     SDValue VL = getVLOperand(Op);
4709     SDValue I32VL =
4710         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4711 
4712     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4713     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4714 
4715     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4716     // instructions.
4717     if (IntNo == Intrinsic::riscv_vslide1up ||
4718         IntNo == Intrinsic::riscv_vslide1up_mask) {
4719       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4720                         I32Mask, I32VL);
4721       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4722                         I32Mask, I32VL);
4723     } else {
4724       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4725                         I32Mask, I32VL);
4726       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4727                         I32Mask, I32VL);
4728     }
4729 
4730     // Convert back to nxvXi64.
4731     Vec = DAG.getBitcast(VT, Vec);
4732 
4733     if (!IsMasked)
4734       return Vec;
4735 
4736     // Apply mask after the operation.
4737     SDValue Mask = Op.getOperand(NumOps - 3);
4738     SDValue MaskedOff = Op.getOperand(1);
4739     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4740   }
4741   }
4742 
4743   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4744 }
4745 
4746 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4747                                                     SelectionDAG &DAG) const {
4748   unsigned IntNo = Op.getConstantOperandVal(1);
4749   switch (IntNo) {
4750   default:
4751     break;
4752   case Intrinsic::riscv_masked_strided_load: {
4753     SDLoc DL(Op);
4754     MVT XLenVT = Subtarget.getXLenVT();
4755 
4756     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4757     // the selection of the masked intrinsics doesn't do this for us.
4758     SDValue Mask = Op.getOperand(5);
4759     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4760 
4761     MVT VT = Op->getSimpleValueType(0);
4762     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4763 
4764     SDValue PassThru = Op.getOperand(2);
4765     if (!IsUnmasked) {
4766       MVT MaskVT =
4767           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4768       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4769       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4770     }
4771 
4772     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4773 
4774     SDValue IntID = DAG.getTargetConstant(
4775         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4776         XLenVT);
4777 
4778     auto *Load = cast<MemIntrinsicSDNode>(Op);
4779     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4780     if (IsUnmasked)
4781       Ops.push_back(DAG.getUNDEF(ContainerVT));
4782     else
4783       Ops.push_back(PassThru);
4784     Ops.push_back(Op.getOperand(3)); // Ptr
4785     Ops.push_back(Op.getOperand(4)); // Stride
4786     if (!IsUnmasked)
4787       Ops.push_back(Mask);
4788     Ops.push_back(VL);
4789     if (!IsUnmasked) {
4790       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4791       Ops.push_back(Policy);
4792     }
4793 
4794     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4795     SDValue Result =
4796         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4797                                 Load->getMemoryVT(), Load->getMemOperand());
4798     SDValue Chain = Result.getValue(1);
4799     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4800     return DAG.getMergeValues({Result, Chain}, DL);
4801   }
4802   }
4803 
4804   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4805 }
4806 
4807 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4808                                                  SelectionDAG &DAG) const {
4809   unsigned IntNo = Op.getConstantOperandVal(1);
4810   switch (IntNo) {
4811   default:
4812     break;
4813   case Intrinsic::riscv_masked_strided_store: {
4814     SDLoc DL(Op);
4815     MVT XLenVT = Subtarget.getXLenVT();
4816 
4817     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4818     // the selection of the masked intrinsics doesn't do this for us.
4819     SDValue Mask = Op.getOperand(5);
4820     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4821 
4822     SDValue Val = Op.getOperand(2);
4823     MVT VT = Val.getSimpleValueType();
4824     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4825 
4826     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4827     if (!IsUnmasked) {
4828       MVT MaskVT =
4829           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4830       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4831     }
4832 
4833     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4834 
4835     SDValue IntID = DAG.getTargetConstant(
4836         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4837         XLenVT);
4838 
4839     auto *Store = cast<MemIntrinsicSDNode>(Op);
4840     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4841     Ops.push_back(Val);
4842     Ops.push_back(Op.getOperand(3)); // Ptr
4843     Ops.push_back(Op.getOperand(4)); // Stride
4844     if (!IsUnmasked)
4845       Ops.push_back(Mask);
4846     Ops.push_back(VL);
4847 
4848     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4849                                    Ops, Store->getMemoryVT(),
4850                                    Store->getMemOperand());
4851   }
4852   }
4853 
4854   return SDValue();
4855 }
4856 
4857 static MVT getLMUL1VT(MVT VT) {
4858   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4859          "Unexpected vector MVT");
4860   return MVT::getScalableVectorVT(
4861       VT.getVectorElementType(),
4862       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4863 }
4864 
4865 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4866   switch (ISDOpcode) {
4867   default:
4868     llvm_unreachable("Unhandled reduction");
4869   case ISD::VECREDUCE_ADD:
4870     return RISCVISD::VECREDUCE_ADD_VL;
4871   case ISD::VECREDUCE_UMAX:
4872     return RISCVISD::VECREDUCE_UMAX_VL;
4873   case ISD::VECREDUCE_SMAX:
4874     return RISCVISD::VECREDUCE_SMAX_VL;
4875   case ISD::VECREDUCE_UMIN:
4876     return RISCVISD::VECREDUCE_UMIN_VL;
4877   case ISD::VECREDUCE_SMIN:
4878     return RISCVISD::VECREDUCE_SMIN_VL;
4879   case ISD::VECREDUCE_AND:
4880     return RISCVISD::VECREDUCE_AND_VL;
4881   case ISD::VECREDUCE_OR:
4882     return RISCVISD::VECREDUCE_OR_VL;
4883   case ISD::VECREDUCE_XOR:
4884     return RISCVISD::VECREDUCE_XOR_VL;
4885   }
4886 }
4887 
4888 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4889                                                          SelectionDAG &DAG,
4890                                                          bool IsVP) const {
4891   SDLoc DL(Op);
4892   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4893   MVT VecVT = Vec.getSimpleValueType();
4894   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4895           Op.getOpcode() == ISD::VECREDUCE_OR ||
4896           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4897           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4898           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4899           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4900          "Unexpected reduction lowering");
4901 
4902   MVT XLenVT = Subtarget.getXLenVT();
4903   assert(Op.getValueType() == XLenVT &&
4904          "Expected reduction output to be legalized to XLenVT");
4905 
4906   MVT ContainerVT = VecVT;
4907   if (VecVT.isFixedLengthVector()) {
4908     ContainerVT = getContainerForFixedLengthVector(VecVT);
4909     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4910   }
4911 
4912   SDValue Mask, VL;
4913   if (IsVP) {
4914     Mask = Op.getOperand(2);
4915     VL = Op.getOperand(3);
4916   } else {
4917     std::tie(Mask, VL) =
4918         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4919   }
4920 
4921   unsigned BaseOpc;
4922   ISD::CondCode CC;
4923   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4924 
4925   switch (Op.getOpcode()) {
4926   default:
4927     llvm_unreachable("Unhandled reduction");
4928   case ISD::VECREDUCE_AND:
4929   case ISD::VP_REDUCE_AND: {
4930     // vcpop ~x == 0
4931     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4932     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4933     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4934     CC = ISD::SETEQ;
4935     BaseOpc = ISD::AND;
4936     break;
4937   }
4938   case ISD::VECREDUCE_OR:
4939   case ISD::VP_REDUCE_OR:
4940     // vcpop x != 0
4941     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4942     CC = ISD::SETNE;
4943     BaseOpc = ISD::OR;
4944     break;
4945   case ISD::VECREDUCE_XOR:
4946   case ISD::VP_REDUCE_XOR: {
4947     // ((vcpop x) & 1) != 0
4948     SDValue One = DAG.getConstant(1, DL, XLenVT);
4949     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4950     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4951     CC = ISD::SETNE;
4952     BaseOpc = ISD::XOR;
4953     break;
4954   }
4955   }
4956 
4957   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4958 
4959   if (!IsVP)
4960     return SetCC;
4961 
4962   // Now include the start value in the operation.
4963   // Note that we must return the start value when no elements are operated
4964   // upon. The vcpop instructions we've emitted in each case above will return
4965   // 0 for an inactive vector, and so we've already received the neutral value:
4966   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4967   // can simply include the start value.
4968   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4969 }
4970 
4971 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4972                                             SelectionDAG &DAG) const {
4973   SDLoc DL(Op);
4974   SDValue Vec = Op.getOperand(0);
4975   EVT VecEVT = Vec.getValueType();
4976 
4977   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4978 
4979   // Due to ordering in legalize types we may have a vector type that needs to
4980   // be split. Do that manually so we can get down to a legal type.
4981   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4982          TargetLowering::TypeSplitVector) {
4983     SDValue Lo, Hi;
4984     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4985     VecEVT = Lo.getValueType();
4986     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4987   }
4988 
4989   // TODO: The type may need to be widened rather than split. Or widened before
4990   // it can be split.
4991   if (!isTypeLegal(VecEVT))
4992     return SDValue();
4993 
4994   MVT VecVT = VecEVT.getSimpleVT();
4995   MVT VecEltVT = VecVT.getVectorElementType();
4996   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4997 
4998   MVT ContainerVT = VecVT;
4999   if (VecVT.isFixedLengthVector()) {
5000     ContainerVT = getContainerForFixedLengthVector(VecVT);
5001     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5002   }
5003 
5004   MVT M1VT = getLMUL1VT(ContainerVT);
5005   MVT XLenVT = Subtarget.getXLenVT();
5006 
5007   SDValue Mask, VL;
5008   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5009 
5010   SDValue NeutralElem =
5011       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5012   SDValue IdentitySplat = lowerScalarSplat(
5013       NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
5014   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5015                                   IdentitySplat, Mask, VL);
5016   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5017                              DAG.getConstant(0, DL, XLenVT));
5018   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5019 }
5020 
5021 // Given a reduction op, this function returns the matching reduction opcode,
5022 // the vector SDValue and the scalar SDValue required to lower this to a
5023 // RISCVISD node.
5024 static std::tuple<unsigned, SDValue, SDValue>
5025 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5026   SDLoc DL(Op);
5027   auto Flags = Op->getFlags();
5028   unsigned Opcode = Op.getOpcode();
5029   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5030   switch (Opcode) {
5031   default:
5032     llvm_unreachable("Unhandled reduction");
5033   case ISD::VECREDUCE_FADD: {
5034     // Use positive zero if we can. It is cheaper to materialize.
5035     SDValue Zero =
5036         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5037     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5038   }
5039   case ISD::VECREDUCE_SEQ_FADD:
5040     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5041                            Op.getOperand(0));
5042   case ISD::VECREDUCE_FMIN:
5043     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5044                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5045   case ISD::VECREDUCE_FMAX:
5046     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5047                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5048   }
5049 }
5050 
5051 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5052                                               SelectionDAG &DAG) const {
5053   SDLoc DL(Op);
5054   MVT VecEltVT = Op.getSimpleValueType();
5055 
5056   unsigned RVVOpcode;
5057   SDValue VectorVal, ScalarVal;
5058   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5059       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5060   MVT VecVT = VectorVal.getSimpleValueType();
5061 
5062   MVT ContainerVT = VecVT;
5063   if (VecVT.isFixedLengthVector()) {
5064     ContainerVT = getContainerForFixedLengthVector(VecVT);
5065     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5066   }
5067 
5068   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5069   MVT XLenVT = Subtarget.getXLenVT();
5070 
5071   SDValue Mask, VL;
5072   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5073 
5074   SDValue ScalarSplat = lowerScalarSplat(
5075       ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
5076   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5077                                   VectorVal, ScalarSplat, Mask, VL);
5078   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5079                      DAG.getConstant(0, DL, XLenVT));
5080 }
5081 
5082 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5083   switch (ISDOpcode) {
5084   default:
5085     llvm_unreachable("Unhandled reduction");
5086   case ISD::VP_REDUCE_ADD:
5087     return RISCVISD::VECREDUCE_ADD_VL;
5088   case ISD::VP_REDUCE_UMAX:
5089     return RISCVISD::VECREDUCE_UMAX_VL;
5090   case ISD::VP_REDUCE_SMAX:
5091     return RISCVISD::VECREDUCE_SMAX_VL;
5092   case ISD::VP_REDUCE_UMIN:
5093     return RISCVISD::VECREDUCE_UMIN_VL;
5094   case ISD::VP_REDUCE_SMIN:
5095     return RISCVISD::VECREDUCE_SMIN_VL;
5096   case ISD::VP_REDUCE_AND:
5097     return RISCVISD::VECREDUCE_AND_VL;
5098   case ISD::VP_REDUCE_OR:
5099     return RISCVISD::VECREDUCE_OR_VL;
5100   case ISD::VP_REDUCE_XOR:
5101     return RISCVISD::VECREDUCE_XOR_VL;
5102   case ISD::VP_REDUCE_FADD:
5103     return RISCVISD::VECREDUCE_FADD_VL;
5104   case ISD::VP_REDUCE_SEQ_FADD:
5105     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5106   case ISD::VP_REDUCE_FMAX:
5107     return RISCVISD::VECREDUCE_FMAX_VL;
5108   case ISD::VP_REDUCE_FMIN:
5109     return RISCVISD::VECREDUCE_FMIN_VL;
5110   }
5111 }
5112 
5113 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5114                                            SelectionDAG &DAG) const {
5115   SDLoc DL(Op);
5116   SDValue Vec = Op.getOperand(1);
5117   EVT VecEVT = Vec.getValueType();
5118 
5119   // TODO: The type may need to be widened rather than split. Or widened before
5120   // it can be split.
5121   if (!isTypeLegal(VecEVT))
5122     return SDValue();
5123 
5124   MVT VecVT = VecEVT.getSimpleVT();
5125   MVT VecEltVT = VecVT.getVectorElementType();
5126   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5127 
5128   MVT ContainerVT = VecVT;
5129   if (VecVT.isFixedLengthVector()) {
5130     ContainerVT = getContainerForFixedLengthVector(VecVT);
5131     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5132   }
5133 
5134   SDValue VL = Op.getOperand(3);
5135   SDValue Mask = Op.getOperand(2);
5136 
5137   MVT M1VT = getLMUL1VT(ContainerVT);
5138   MVT XLenVT = Subtarget.getXLenVT();
5139   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5140 
5141   SDValue StartSplat =
5142       lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT,
5143                        DL, DAG, Subtarget);
5144   SDValue Reduction =
5145       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5146   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5147                              DAG.getConstant(0, DL, XLenVT));
5148   if (!VecVT.isInteger())
5149     return Elt0;
5150   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5151 }
5152 
5153 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5154                                                    SelectionDAG &DAG) const {
5155   SDValue Vec = Op.getOperand(0);
5156   SDValue SubVec = Op.getOperand(1);
5157   MVT VecVT = Vec.getSimpleValueType();
5158   MVT SubVecVT = SubVec.getSimpleValueType();
5159 
5160   SDLoc DL(Op);
5161   MVT XLenVT = Subtarget.getXLenVT();
5162   unsigned OrigIdx = Op.getConstantOperandVal(2);
5163   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5164 
5165   // We don't have the ability to slide mask vectors up indexed by their i1
5166   // elements; the smallest we can do is i8. Often we are able to bitcast to
5167   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5168   // into a scalable one, we might not necessarily have enough scalable
5169   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5170   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5171       (OrigIdx != 0 || !Vec.isUndef())) {
5172     if (VecVT.getVectorMinNumElements() >= 8 &&
5173         SubVecVT.getVectorMinNumElements() >= 8) {
5174       assert(OrigIdx % 8 == 0 && "Invalid index");
5175       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5176              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5177              "Unexpected mask vector lowering");
5178       OrigIdx /= 8;
5179       SubVecVT =
5180           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5181                            SubVecVT.isScalableVector());
5182       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5183                                VecVT.isScalableVector());
5184       Vec = DAG.getBitcast(VecVT, Vec);
5185       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5186     } else {
5187       // We can't slide this mask vector up indexed by its i1 elements.
5188       // This poses a problem when we wish to insert a scalable vector which
5189       // can't be re-expressed as a larger type. Just choose the slow path and
5190       // extend to a larger type, then truncate back down.
5191       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5192       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5193       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5194       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5195       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5196                         Op.getOperand(2));
5197       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5198       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5199     }
5200   }
5201 
5202   // If the subvector vector is a fixed-length type, we cannot use subregister
5203   // manipulation to simplify the codegen; we don't know which register of a
5204   // LMUL group contains the specific subvector as we only know the minimum
5205   // register size. Therefore we must slide the vector group up the full
5206   // amount.
5207   if (SubVecVT.isFixedLengthVector()) {
5208     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5209       return Op;
5210     MVT ContainerVT = VecVT;
5211     if (VecVT.isFixedLengthVector()) {
5212       ContainerVT = getContainerForFixedLengthVector(VecVT);
5213       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5214     }
5215     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5216                          DAG.getUNDEF(ContainerVT), SubVec,
5217                          DAG.getConstant(0, DL, XLenVT));
5218     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5219       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5220       return DAG.getBitcast(Op.getValueType(), SubVec);
5221     }
5222     SDValue Mask =
5223         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5224     // Set the vector length to only the number of elements we care about. Note
5225     // that for slideup this includes the offset.
5226     SDValue VL =
5227         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5228     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5229     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5230                                   SubVec, SlideupAmt, Mask, VL);
5231     if (VecVT.isFixedLengthVector())
5232       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5233     return DAG.getBitcast(Op.getValueType(), Slideup);
5234   }
5235 
5236   unsigned SubRegIdx, RemIdx;
5237   std::tie(SubRegIdx, RemIdx) =
5238       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5239           VecVT, SubVecVT, OrigIdx, TRI);
5240 
5241   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5242   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5243                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5244                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5245 
5246   // 1. If the Idx has been completely eliminated and this subvector's size is
5247   // a vector register or a multiple thereof, or the surrounding elements are
5248   // undef, then this is a subvector insert which naturally aligns to a vector
5249   // register. These can easily be handled using subregister manipulation.
5250   // 2. If the subvector is smaller than a vector register, then the insertion
5251   // must preserve the undisturbed elements of the register. We do this by
5252   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5253   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5254   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5255   // LMUL=1 type back into the larger vector (resolving to another subregister
5256   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5257   // to avoid allocating a large register group to hold our subvector.
5258   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5259     return Op;
5260 
5261   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5262   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5263   // (in our case undisturbed). This means we can set up a subvector insertion
5264   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5265   // size of the subvector.
5266   MVT InterSubVT = VecVT;
5267   SDValue AlignedExtract = Vec;
5268   unsigned AlignedIdx = OrigIdx - RemIdx;
5269   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5270     InterSubVT = getLMUL1VT(VecVT);
5271     // Extract a subvector equal to the nearest full vector register type. This
5272     // should resolve to a EXTRACT_SUBREG instruction.
5273     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5274                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5275   }
5276 
5277   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5278   // For scalable vectors this must be further multiplied by vscale.
5279   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5280 
5281   SDValue Mask, VL;
5282   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5283 
5284   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5285   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5286   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5287   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5288 
5289   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5290                        DAG.getUNDEF(InterSubVT), SubVec,
5291                        DAG.getConstant(0, DL, XLenVT));
5292 
5293   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5294                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5295 
5296   // If required, insert this subvector back into the correct vector register.
5297   // This should resolve to an INSERT_SUBREG instruction.
5298   if (VecVT.bitsGT(InterSubVT))
5299     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5300                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5301 
5302   // We might have bitcast from a mask type: cast back to the original type if
5303   // required.
5304   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5305 }
5306 
5307 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5308                                                     SelectionDAG &DAG) const {
5309   SDValue Vec = Op.getOperand(0);
5310   MVT SubVecVT = Op.getSimpleValueType();
5311   MVT VecVT = Vec.getSimpleValueType();
5312 
5313   SDLoc DL(Op);
5314   MVT XLenVT = Subtarget.getXLenVT();
5315   unsigned OrigIdx = Op.getConstantOperandVal(1);
5316   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5317 
5318   // We don't have the ability to slide mask vectors down indexed by their i1
5319   // elements; the smallest we can do is i8. Often we are able to bitcast to
5320   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5321   // from a scalable one, we might not necessarily have enough scalable
5322   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5323   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5324     if (VecVT.getVectorMinNumElements() >= 8 &&
5325         SubVecVT.getVectorMinNumElements() >= 8) {
5326       assert(OrigIdx % 8 == 0 && "Invalid index");
5327       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5328              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5329              "Unexpected mask vector lowering");
5330       OrigIdx /= 8;
5331       SubVecVT =
5332           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5333                            SubVecVT.isScalableVector());
5334       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5335                                VecVT.isScalableVector());
5336       Vec = DAG.getBitcast(VecVT, Vec);
5337     } else {
5338       // We can't slide this mask vector down, indexed by its i1 elements.
5339       // This poses a problem when we wish to extract a scalable vector which
5340       // can't be re-expressed as a larger type. Just choose the slow path and
5341       // extend to a larger type, then truncate back down.
5342       // TODO: We could probably improve this when extracting certain fixed
5343       // from fixed, where we can extract as i8 and shift the correct element
5344       // right to reach the desired subvector?
5345       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5346       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5347       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5348       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5349                         Op.getOperand(1));
5350       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5351       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5352     }
5353   }
5354 
5355   // If the subvector vector is a fixed-length type, we cannot use subregister
5356   // manipulation to simplify the codegen; we don't know which register of a
5357   // LMUL group contains the specific subvector as we only know the minimum
5358   // register size. Therefore we must slide the vector group down the full
5359   // amount.
5360   if (SubVecVT.isFixedLengthVector()) {
5361     // With an index of 0 this is a cast-like subvector, which can be performed
5362     // with subregister operations.
5363     if (OrigIdx == 0)
5364       return Op;
5365     MVT ContainerVT = VecVT;
5366     if (VecVT.isFixedLengthVector()) {
5367       ContainerVT = getContainerForFixedLengthVector(VecVT);
5368       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5369     }
5370     SDValue Mask =
5371         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5372     // Set the vector length to only the number of elements we care about. This
5373     // avoids sliding down elements we're going to discard straight away.
5374     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5375     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5376     SDValue Slidedown =
5377         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5378                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5379     // Now we can use a cast-like subvector extract to get the result.
5380     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5381                             DAG.getConstant(0, DL, XLenVT));
5382     return DAG.getBitcast(Op.getValueType(), Slidedown);
5383   }
5384 
5385   unsigned SubRegIdx, RemIdx;
5386   std::tie(SubRegIdx, RemIdx) =
5387       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5388           VecVT, SubVecVT, OrigIdx, TRI);
5389 
5390   // If the Idx has been completely eliminated then this is a subvector extract
5391   // which naturally aligns to a vector register. These can easily be handled
5392   // using subregister manipulation.
5393   if (RemIdx == 0)
5394     return Op;
5395 
5396   // Else we must shift our vector register directly to extract the subvector.
5397   // Do this using VSLIDEDOWN.
5398 
5399   // If the vector type is an LMUL-group type, extract a subvector equal to the
5400   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5401   // instruction.
5402   MVT InterSubVT = VecVT;
5403   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5404     InterSubVT = getLMUL1VT(VecVT);
5405     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5406                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5407   }
5408 
5409   // Slide this vector register down by the desired number of elements in order
5410   // to place the desired subvector starting at element 0.
5411   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5412   // For scalable vectors this must be further multiplied by vscale.
5413   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5414 
5415   SDValue Mask, VL;
5416   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5417   SDValue Slidedown =
5418       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5419                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5420 
5421   // Now the vector is in the right position, extract our final subvector. This
5422   // should resolve to a COPY.
5423   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5424                           DAG.getConstant(0, DL, XLenVT));
5425 
5426   // We might have bitcast from a mask type: cast back to the original type if
5427   // required.
5428   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5429 }
5430 
5431 // Lower step_vector to the vid instruction. Any non-identity step value must
5432 // be accounted for my manual expansion.
5433 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5434                                               SelectionDAG &DAG) const {
5435   SDLoc DL(Op);
5436   MVT VT = Op.getSimpleValueType();
5437   MVT XLenVT = Subtarget.getXLenVT();
5438   SDValue Mask, VL;
5439   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5440   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5441   uint64_t StepValImm = Op.getConstantOperandVal(0);
5442   if (StepValImm != 1) {
5443     if (isPowerOf2_64(StepValImm)) {
5444       SDValue StepVal =
5445           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
5446                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5447       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5448     } else {
5449       SDValue StepVal = lowerScalarSplat(
5450           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
5451           DL, DAG, Subtarget);
5452       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5453     }
5454   }
5455   return StepVec;
5456 }
5457 
5458 // Implement vector_reverse using vrgather.vv with indices determined by
5459 // subtracting the id of each element from (VLMAX-1). This will convert
5460 // the indices like so:
5461 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5462 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5463 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5464                                                  SelectionDAG &DAG) const {
5465   SDLoc DL(Op);
5466   MVT VecVT = Op.getSimpleValueType();
5467   unsigned EltSize = VecVT.getScalarSizeInBits();
5468   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5469 
5470   unsigned MaxVLMAX = 0;
5471   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5472   if (VectorBitsMax != 0)
5473     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5474 
5475   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5476   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5477 
5478   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5479   // to use vrgatherei16.vv.
5480   // TODO: It's also possible to use vrgatherei16.vv for other types to
5481   // decrease register width for the index calculation.
5482   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5483     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5484     // Reverse each half, then reassemble them in reverse order.
5485     // NOTE: It's also possible that after splitting that VLMAX no longer
5486     // requires vrgatherei16.vv.
5487     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5488       SDValue Lo, Hi;
5489       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5490       EVT LoVT, HiVT;
5491       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5492       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5493       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5494       // Reassemble the low and high pieces reversed.
5495       // FIXME: This is a CONCAT_VECTORS.
5496       SDValue Res =
5497           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5498                       DAG.getIntPtrConstant(0, DL));
5499       return DAG.getNode(
5500           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5501           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5502     }
5503 
5504     // Just promote the int type to i16 which will double the LMUL.
5505     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5506     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5507   }
5508 
5509   MVT XLenVT = Subtarget.getXLenVT();
5510   SDValue Mask, VL;
5511   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5512 
5513   // Calculate VLMAX-1 for the desired SEW.
5514   unsigned MinElts = VecVT.getVectorMinNumElements();
5515   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5516                               DAG.getConstant(MinElts, DL, XLenVT));
5517   SDValue VLMinus1 =
5518       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5519 
5520   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5521   bool IsRV32E64 =
5522       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5523   SDValue SplatVL;
5524   if (!IsRV32E64)
5525     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5526   else
5527     SplatVL =
5528         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, VLMinus1,
5529                     DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5530 
5531   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5532   SDValue Indices =
5533       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5534 
5535   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5536 }
5537 
5538 SDValue
5539 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5540                                                      SelectionDAG &DAG) const {
5541   SDLoc DL(Op);
5542   auto *Load = cast<LoadSDNode>(Op);
5543 
5544   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5545                                         Load->getMemoryVT(),
5546                                         *Load->getMemOperand()) &&
5547          "Expecting a correctly-aligned load");
5548 
5549   MVT VT = Op.getSimpleValueType();
5550   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5551 
5552   SDValue VL =
5553       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5554 
5555   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5556   SDValue NewLoad = DAG.getMemIntrinsicNode(
5557       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5558       Load->getMemoryVT(), Load->getMemOperand());
5559 
5560   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5561   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5562 }
5563 
5564 SDValue
5565 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5566                                                       SelectionDAG &DAG) const {
5567   SDLoc DL(Op);
5568   auto *Store = cast<StoreSDNode>(Op);
5569 
5570   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5571                                         Store->getMemoryVT(),
5572                                         *Store->getMemOperand()) &&
5573          "Expecting a correctly-aligned store");
5574 
5575   SDValue StoreVal = Store->getValue();
5576   MVT VT = StoreVal.getSimpleValueType();
5577 
5578   // If the size less than a byte, we need to pad with zeros to make a byte.
5579   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5580     VT = MVT::v8i1;
5581     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5582                            DAG.getConstant(0, DL, VT), StoreVal,
5583                            DAG.getIntPtrConstant(0, DL));
5584   }
5585 
5586   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5587 
5588   SDValue VL =
5589       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5590 
5591   SDValue NewValue =
5592       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5593   return DAG.getMemIntrinsicNode(
5594       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5595       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5596       Store->getMemoryVT(), Store->getMemOperand());
5597 }
5598 
5599 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5600                                              SelectionDAG &DAG) const {
5601   SDLoc DL(Op);
5602   MVT VT = Op.getSimpleValueType();
5603 
5604   const auto *MemSD = cast<MemSDNode>(Op);
5605   EVT MemVT = MemSD->getMemoryVT();
5606   MachineMemOperand *MMO = MemSD->getMemOperand();
5607   SDValue Chain = MemSD->getChain();
5608   SDValue BasePtr = MemSD->getBasePtr();
5609 
5610   SDValue Mask, PassThru, VL;
5611   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5612     Mask = VPLoad->getMask();
5613     PassThru = DAG.getUNDEF(VT);
5614     VL = VPLoad->getVectorLength();
5615   } else {
5616     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5617     Mask = MLoad->getMask();
5618     PassThru = MLoad->getPassThru();
5619   }
5620 
5621   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5622 
5623   MVT XLenVT = Subtarget.getXLenVT();
5624 
5625   MVT ContainerVT = VT;
5626   if (VT.isFixedLengthVector()) {
5627     ContainerVT = getContainerForFixedLengthVector(VT);
5628     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5629     if (!IsUnmasked) {
5630       MVT MaskVT =
5631           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5632       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5633     }
5634   }
5635 
5636   if (!VL)
5637     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5638 
5639   unsigned IntID =
5640       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5641   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5642   if (IsUnmasked)
5643     Ops.push_back(DAG.getUNDEF(ContainerVT));
5644   else
5645     Ops.push_back(PassThru);
5646   Ops.push_back(BasePtr);
5647   if (!IsUnmasked)
5648     Ops.push_back(Mask);
5649   Ops.push_back(VL);
5650   if (!IsUnmasked)
5651     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5652 
5653   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5654 
5655   SDValue Result =
5656       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5657   Chain = Result.getValue(1);
5658 
5659   if (VT.isFixedLengthVector())
5660     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5661 
5662   return DAG.getMergeValues({Result, Chain}, DL);
5663 }
5664 
5665 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5666                                               SelectionDAG &DAG) const {
5667   SDLoc DL(Op);
5668 
5669   const auto *MemSD = cast<MemSDNode>(Op);
5670   EVT MemVT = MemSD->getMemoryVT();
5671   MachineMemOperand *MMO = MemSD->getMemOperand();
5672   SDValue Chain = MemSD->getChain();
5673   SDValue BasePtr = MemSD->getBasePtr();
5674   SDValue Val, Mask, VL;
5675 
5676   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5677     Val = VPStore->getValue();
5678     Mask = VPStore->getMask();
5679     VL = VPStore->getVectorLength();
5680   } else {
5681     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5682     Val = MStore->getValue();
5683     Mask = MStore->getMask();
5684   }
5685 
5686   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5687 
5688   MVT VT = Val.getSimpleValueType();
5689   MVT XLenVT = Subtarget.getXLenVT();
5690 
5691   MVT ContainerVT = VT;
5692   if (VT.isFixedLengthVector()) {
5693     ContainerVT = getContainerForFixedLengthVector(VT);
5694 
5695     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5696     if (!IsUnmasked) {
5697       MVT MaskVT =
5698           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5699       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5700     }
5701   }
5702 
5703   if (!VL)
5704     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5705 
5706   unsigned IntID =
5707       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5708   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5709   Ops.push_back(Val);
5710   Ops.push_back(BasePtr);
5711   if (!IsUnmasked)
5712     Ops.push_back(Mask);
5713   Ops.push_back(VL);
5714 
5715   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5716                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5717 }
5718 
5719 SDValue
5720 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5721                                                       SelectionDAG &DAG) const {
5722   MVT InVT = Op.getOperand(0).getSimpleValueType();
5723   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5724 
5725   MVT VT = Op.getSimpleValueType();
5726 
5727   SDValue Op1 =
5728       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5729   SDValue Op2 =
5730       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5731 
5732   SDLoc DL(Op);
5733   SDValue VL =
5734       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5735 
5736   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5737   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5738 
5739   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5740                             Op.getOperand(2), Mask, VL);
5741 
5742   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5743 }
5744 
5745 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5746     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5747   MVT VT = Op.getSimpleValueType();
5748 
5749   if (VT.getVectorElementType() == MVT::i1)
5750     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5751 
5752   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5753 }
5754 
5755 SDValue
5756 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5757                                                       SelectionDAG &DAG) const {
5758   unsigned Opc;
5759   switch (Op.getOpcode()) {
5760   default: llvm_unreachable("Unexpected opcode!");
5761   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5762   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5763   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5764   }
5765 
5766   return lowerToScalableOp(Op, DAG, Opc);
5767 }
5768 
5769 // Lower vector ABS to smax(X, sub(0, X)).
5770 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5771   SDLoc DL(Op);
5772   MVT VT = Op.getSimpleValueType();
5773   SDValue X = Op.getOperand(0);
5774 
5775   assert(VT.isFixedLengthVector() && "Unexpected type");
5776 
5777   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5778   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5779 
5780   SDValue Mask, VL;
5781   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5782 
5783   SDValue SplatZero =
5784       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5785                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5786   SDValue NegX =
5787       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5788   SDValue Max =
5789       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5790 
5791   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5792 }
5793 
5794 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5795     SDValue Op, SelectionDAG &DAG) const {
5796   SDLoc DL(Op);
5797   MVT VT = Op.getSimpleValueType();
5798   SDValue Mag = Op.getOperand(0);
5799   SDValue Sign = Op.getOperand(1);
5800   assert(Mag.getValueType() == Sign.getValueType() &&
5801          "Can only handle COPYSIGN with matching types.");
5802 
5803   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5804   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5805   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5806 
5807   SDValue Mask, VL;
5808   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5809 
5810   SDValue CopySign =
5811       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5812 
5813   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5814 }
5815 
5816 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5817     SDValue Op, SelectionDAG &DAG) const {
5818   MVT VT = Op.getSimpleValueType();
5819   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5820 
5821   MVT I1ContainerVT =
5822       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5823 
5824   SDValue CC =
5825       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5826   SDValue Op1 =
5827       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5828   SDValue Op2 =
5829       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5830 
5831   SDLoc DL(Op);
5832   SDValue Mask, VL;
5833   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5834 
5835   SDValue Select =
5836       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5837 
5838   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5839 }
5840 
5841 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5842                                                unsigned NewOpc,
5843                                                bool HasMask) const {
5844   MVT VT = Op.getSimpleValueType();
5845   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5846 
5847   // Create list of operands by converting existing ones to scalable types.
5848   SmallVector<SDValue, 6> Ops;
5849   for (const SDValue &V : Op->op_values()) {
5850     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5851 
5852     // Pass through non-vector operands.
5853     if (!V.getValueType().isVector()) {
5854       Ops.push_back(V);
5855       continue;
5856     }
5857 
5858     // "cast" fixed length vector to a scalable vector.
5859     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5860            "Only fixed length vectors are supported!");
5861     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5862   }
5863 
5864   SDLoc DL(Op);
5865   SDValue Mask, VL;
5866   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5867   if (HasMask)
5868     Ops.push_back(Mask);
5869   Ops.push_back(VL);
5870 
5871   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5872   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5873 }
5874 
5875 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5876 // * Operands of each node are assumed to be in the same order.
5877 // * The EVL operand is promoted from i32 to i64 on RV64.
5878 // * Fixed-length vectors are converted to their scalable-vector container
5879 //   types.
5880 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5881                                        unsigned RISCVISDOpc) const {
5882   SDLoc DL(Op);
5883   MVT VT = Op.getSimpleValueType();
5884   SmallVector<SDValue, 4> Ops;
5885 
5886   for (const auto &OpIdx : enumerate(Op->ops())) {
5887     SDValue V = OpIdx.value();
5888     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5889     // Pass through operands which aren't fixed-length vectors.
5890     if (!V.getValueType().isFixedLengthVector()) {
5891       Ops.push_back(V);
5892       continue;
5893     }
5894     // "cast" fixed length vector to a scalable vector.
5895     MVT OpVT = V.getSimpleValueType();
5896     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5897     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5898            "Only fixed length vectors are supported!");
5899     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5900   }
5901 
5902   if (!VT.isFixedLengthVector())
5903     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5904 
5905   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5906 
5907   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5908 
5909   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5910 }
5911 
5912 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5913                                             unsigned MaskOpc,
5914                                             unsigned VecOpc) const {
5915   MVT VT = Op.getSimpleValueType();
5916   if (VT.getVectorElementType() != MVT::i1)
5917     return lowerVPOp(Op, DAG, VecOpc);
5918 
5919   // It is safe to drop mask parameter as masked-off elements are undef.
5920   SDValue Op1 = Op->getOperand(0);
5921   SDValue Op2 = Op->getOperand(1);
5922   SDValue VL = Op->getOperand(3);
5923 
5924   MVT ContainerVT = VT;
5925   const bool IsFixed = VT.isFixedLengthVector();
5926   if (IsFixed) {
5927     ContainerVT = getContainerForFixedLengthVector(VT);
5928     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
5929     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
5930   }
5931 
5932   SDLoc DL(Op);
5933   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
5934   if (!IsFixed)
5935     return Val;
5936   return convertFromScalableVector(VT, Val, DAG, Subtarget);
5937 }
5938 
5939 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5940 // matched to a RVV indexed load. The RVV indexed load instructions only
5941 // support the "unsigned unscaled" addressing mode; indices are implicitly
5942 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5943 // signed or scaled indexing is extended to the XLEN value type and scaled
5944 // accordingly.
5945 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5946                                                SelectionDAG &DAG) const {
5947   SDLoc DL(Op);
5948   MVT VT = Op.getSimpleValueType();
5949 
5950   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5951   EVT MemVT = MemSD->getMemoryVT();
5952   MachineMemOperand *MMO = MemSD->getMemOperand();
5953   SDValue Chain = MemSD->getChain();
5954   SDValue BasePtr = MemSD->getBasePtr();
5955 
5956   ISD::LoadExtType LoadExtType;
5957   SDValue Index, Mask, PassThru, VL;
5958 
5959   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5960     Index = VPGN->getIndex();
5961     Mask = VPGN->getMask();
5962     PassThru = DAG.getUNDEF(VT);
5963     VL = VPGN->getVectorLength();
5964     // VP doesn't support extending loads.
5965     LoadExtType = ISD::NON_EXTLOAD;
5966   } else {
5967     // Else it must be a MGATHER.
5968     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5969     Index = MGN->getIndex();
5970     Mask = MGN->getMask();
5971     PassThru = MGN->getPassThru();
5972     LoadExtType = MGN->getExtensionType();
5973   }
5974 
5975   MVT IndexVT = Index.getSimpleValueType();
5976   MVT XLenVT = Subtarget.getXLenVT();
5977 
5978   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5979          "Unexpected VTs!");
5980   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5981   // Targets have to explicitly opt-in for extending vector loads.
5982   assert(LoadExtType == ISD::NON_EXTLOAD &&
5983          "Unexpected extending MGATHER/VP_GATHER");
5984   (void)LoadExtType;
5985 
5986   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5987   // the selection of the masked intrinsics doesn't do this for us.
5988   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5989 
5990   MVT ContainerVT = VT;
5991   if (VT.isFixedLengthVector()) {
5992     // We need to use the larger of the result and index type to determine the
5993     // scalable type to use so we don't increase LMUL for any operand/result.
5994     if (VT.bitsGE(IndexVT)) {
5995       ContainerVT = getContainerForFixedLengthVector(VT);
5996       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5997                                  ContainerVT.getVectorElementCount());
5998     } else {
5999       IndexVT = getContainerForFixedLengthVector(IndexVT);
6000       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6001                                      IndexVT.getVectorElementCount());
6002     }
6003 
6004     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6005 
6006     if (!IsUnmasked) {
6007       MVT MaskVT =
6008           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6009       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6010       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6011     }
6012   }
6013 
6014   if (!VL)
6015     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6016 
6017   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6018     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6019     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6020                                    VL);
6021     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6022                         TrueMask, VL);
6023   }
6024 
6025   unsigned IntID =
6026       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6027   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6028   if (IsUnmasked)
6029     Ops.push_back(DAG.getUNDEF(ContainerVT));
6030   else
6031     Ops.push_back(PassThru);
6032   Ops.push_back(BasePtr);
6033   Ops.push_back(Index);
6034   if (!IsUnmasked)
6035     Ops.push_back(Mask);
6036   Ops.push_back(VL);
6037   if (!IsUnmasked)
6038     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6039 
6040   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6041   SDValue Result =
6042       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6043   Chain = Result.getValue(1);
6044 
6045   if (VT.isFixedLengthVector())
6046     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6047 
6048   return DAG.getMergeValues({Result, Chain}, DL);
6049 }
6050 
6051 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6052 // matched to a RVV indexed store. The RVV indexed store instructions only
6053 // support the "unsigned unscaled" addressing mode; indices are implicitly
6054 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6055 // signed or scaled indexing is extended to the XLEN value type and scaled
6056 // accordingly.
6057 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6058                                                 SelectionDAG &DAG) const {
6059   SDLoc DL(Op);
6060   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6061   EVT MemVT = MemSD->getMemoryVT();
6062   MachineMemOperand *MMO = MemSD->getMemOperand();
6063   SDValue Chain = MemSD->getChain();
6064   SDValue BasePtr = MemSD->getBasePtr();
6065 
6066   bool IsTruncatingStore = false;
6067   SDValue Index, Mask, Val, VL;
6068 
6069   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6070     Index = VPSN->getIndex();
6071     Mask = VPSN->getMask();
6072     Val = VPSN->getValue();
6073     VL = VPSN->getVectorLength();
6074     // VP doesn't support truncating stores.
6075     IsTruncatingStore = false;
6076   } else {
6077     // Else it must be a MSCATTER.
6078     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6079     Index = MSN->getIndex();
6080     Mask = MSN->getMask();
6081     Val = MSN->getValue();
6082     IsTruncatingStore = MSN->isTruncatingStore();
6083   }
6084 
6085   MVT VT = Val.getSimpleValueType();
6086   MVT IndexVT = Index.getSimpleValueType();
6087   MVT XLenVT = Subtarget.getXLenVT();
6088 
6089   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6090          "Unexpected VTs!");
6091   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6092   // Targets have to explicitly opt-in for extending vector loads and
6093   // truncating vector stores.
6094   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6095   (void)IsTruncatingStore;
6096 
6097   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6098   // the selection of the masked intrinsics doesn't do this for us.
6099   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6100 
6101   MVT ContainerVT = VT;
6102   if (VT.isFixedLengthVector()) {
6103     // We need to use the larger of the value and index type to determine the
6104     // scalable type to use so we don't increase LMUL for any operand/result.
6105     if (VT.bitsGE(IndexVT)) {
6106       ContainerVT = getContainerForFixedLengthVector(VT);
6107       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6108                                  ContainerVT.getVectorElementCount());
6109     } else {
6110       IndexVT = getContainerForFixedLengthVector(IndexVT);
6111       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6112                                      IndexVT.getVectorElementCount());
6113     }
6114 
6115     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6116     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6117 
6118     if (!IsUnmasked) {
6119       MVT MaskVT =
6120           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6121       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6122     }
6123   }
6124 
6125   if (!VL)
6126     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6127 
6128   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6129     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6130     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6131                                    VL);
6132     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6133                         TrueMask, VL);
6134   }
6135 
6136   unsigned IntID =
6137       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6138   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6139   Ops.push_back(Val);
6140   Ops.push_back(BasePtr);
6141   Ops.push_back(Index);
6142   if (!IsUnmasked)
6143     Ops.push_back(Mask);
6144   Ops.push_back(VL);
6145 
6146   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6147                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6148 }
6149 
6150 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6151                                                SelectionDAG &DAG) const {
6152   const MVT XLenVT = Subtarget.getXLenVT();
6153   SDLoc DL(Op);
6154   SDValue Chain = Op->getOperand(0);
6155   SDValue SysRegNo = DAG.getTargetConstant(
6156       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6157   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6158   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6159 
6160   // Encoding used for rounding mode in RISCV differs from that used in
6161   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6162   // table, which consists of a sequence of 4-bit fields, each representing
6163   // corresponding FLT_ROUNDS mode.
6164   static const int Table =
6165       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6166       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6167       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6168       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6169       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6170 
6171   SDValue Shift =
6172       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6173   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6174                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6175   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6176                                DAG.getConstant(7, DL, XLenVT));
6177 
6178   return DAG.getMergeValues({Masked, Chain}, DL);
6179 }
6180 
6181 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6182                                                SelectionDAG &DAG) const {
6183   const MVT XLenVT = Subtarget.getXLenVT();
6184   SDLoc DL(Op);
6185   SDValue Chain = Op->getOperand(0);
6186   SDValue RMValue = Op->getOperand(1);
6187   SDValue SysRegNo = DAG.getTargetConstant(
6188       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6189 
6190   // Encoding used for rounding mode in RISCV differs from that used in
6191   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6192   // a table, which consists of a sequence of 4-bit fields, each representing
6193   // corresponding RISCV mode.
6194   static const unsigned Table =
6195       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6196       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6197       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6198       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6199       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6200 
6201   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6202                               DAG.getConstant(2, DL, XLenVT));
6203   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6204                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6205   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6206                         DAG.getConstant(0x7, DL, XLenVT));
6207   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6208                      RMValue);
6209 }
6210 
6211 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6212   switch (IntNo) {
6213   default:
6214     llvm_unreachable("Unexpected Intrinsic");
6215   case Intrinsic::riscv_grev:
6216     return RISCVISD::GREVW;
6217   case Intrinsic::riscv_gorc:
6218     return RISCVISD::GORCW;
6219   case Intrinsic::riscv_bcompress:
6220     return RISCVISD::BCOMPRESSW;
6221   case Intrinsic::riscv_bdecompress:
6222     return RISCVISD::BDECOMPRESSW;
6223   case Intrinsic::riscv_bfp:
6224     return RISCVISD::BFPW;
6225   case Intrinsic::riscv_fsl:
6226     return RISCVISD::FSLW;
6227   case Intrinsic::riscv_fsr:
6228     return RISCVISD::FSRW;
6229   }
6230 }
6231 
6232 // Converts the given intrinsic to a i64 operation with any extension.
6233 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6234                                          unsigned IntNo) {
6235   SDLoc DL(N);
6236   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6237   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6238   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6239   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6240   // ReplaceNodeResults requires we maintain the same type for the return value.
6241   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6242 }
6243 
6244 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6245 // form of the given Opcode.
6246 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6247   switch (Opcode) {
6248   default:
6249     llvm_unreachable("Unexpected opcode");
6250   case ISD::SHL:
6251     return RISCVISD::SLLW;
6252   case ISD::SRA:
6253     return RISCVISD::SRAW;
6254   case ISD::SRL:
6255     return RISCVISD::SRLW;
6256   case ISD::SDIV:
6257     return RISCVISD::DIVW;
6258   case ISD::UDIV:
6259     return RISCVISD::DIVUW;
6260   case ISD::UREM:
6261     return RISCVISD::REMUW;
6262   case ISD::ROTL:
6263     return RISCVISD::ROLW;
6264   case ISD::ROTR:
6265     return RISCVISD::RORW;
6266   case RISCVISD::GREV:
6267     return RISCVISD::GREVW;
6268   case RISCVISD::GORC:
6269     return RISCVISD::GORCW;
6270   }
6271 }
6272 
6273 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6274 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6275 // otherwise be promoted to i64, making it difficult to select the
6276 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6277 // type i8/i16/i32 is lost.
6278 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6279                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6280   SDLoc DL(N);
6281   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6282   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6283   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6284   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6285   // ReplaceNodeResults requires we maintain the same type for the return value.
6286   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6287 }
6288 
6289 // Converts the given 32-bit operation to a i64 operation with signed extension
6290 // semantic to reduce the signed extension instructions.
6291 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6292   SDLoc DL(N);
6293   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6294   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6295   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6296   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6297                                DAG.getValueType(MVT::i32));
6298   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6299 }
6300 
6301 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6302                                              SmallVectorImpl<SDValue> &Results,
6303                                              SelectionDAG &DAG) const {
6304   SDLoc DL(N);
6305   switch (N->getOpcode()) {
6306   default:
6307     llvm_unreachable("Don't know how to custom type legalize this operation!");
6308   case ISD::STRICT_FP_TO_SINT:
6309   case ISD::STRICT_FP_TO_UINT:
6310   case ISD::FP_TO_SINT:
6311   case ISD::FP_TO_UINT: {
6312     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6313            "Unexpected custom legalisation");
6314     bool IsStrict = N->isStrictFPOpcode();
6315     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6316                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6317     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6318     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6319         TargetLowering::TypeSoftenFloat) {
6320       if (!isTypeLegal(Op0.getValueType()))
6321         return;
6322       if (IsStrict) {
6323         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6324                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6325         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6326         SDValue Res = DAG.getNode(
6327             Opc, DL, VTs, N->getOperand(0), Op0,
6328             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6329         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6330         Results.push_back(Res.getValue(1));
6331         return;
6332       }
6333       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6334       SDValue Res =
6335           DAG.getNode(Opc, DL, MVT::i64, Op0,
6336                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6337       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6338       return;
6339     }
6340     // If the FP type needs to be softened, emit a library call using the 'si'
6341     // version. If we left it to default legalization we'd end up with 'di'. If
6342     // the FP type doesn't need to be softened just let generic type
6343     // legalization promote the result type.
6344     RTLIB::Libcall LC;
6345     if (IsSigned)
6346       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6347     else
6348       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6349     MakeLibCallOptions CallOptions;
6350     EVT OpVT = Op0.getValueType();
6351     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6352     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6353     SDValue Result;
6354     std::tie(Result, Chain) =
6355         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6356     Results.push_back(Result);
6357     if (IsStrict)
6358       Results.push_back(Chain);
6359     break;
6360   }
6361   case ISD::READCYCLECOUNTER: {
6362     assert(!Subtarget.is64Bit() &&
6363            "READCYCLECOUNTER only has custom type legalization on riscv32");
6364 
6365     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6366     SDValue RCW =
6367         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6368 
6369     Results.push_back(
6370         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6371     Results.push_back(RCW.getValue(2));
6372     break;
6373   }
6374   case ISD::MUL: {
6375     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6376     unsigned XLen = Subtarget.getXLen();
6377     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6378     if (Size > XLen) {
6379       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6380       SDValue LHS = N->getOperand(0);
6381       SDValue RHS = N->getOperand(1);
6382       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6383 
6384       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6385       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6386       // We need exactly one side to be unsigned.
6387       if (LHSIsU == RHSIsU)
6388         return;
6389 
6390       auto MakeMULPair = [&](SDValue S, SDValue U) {
6391         MVT XLenVT = Subtarget.getXLenVT();
6392         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6393         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6394         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6395         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6396         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6397       };
6398 
6399       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6400       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6401 
6402       // The other operand should be signed, but still prefer MULH when
6403       // possible.
6404       if (RHSIsU && LHSIsS && !RHSIsS)
6405         Results.push_back(MakeMULPair(LHS, RHS));
6406       else if (LHSIsU && RHSIsS && !LHSIsS)
6407         Results.push_back(MakeMULPair(RHS, LHS));
6408 
6409       return;
6410     }
6411     LLVM_FALLTHROUGH;
6412   }
6413   case ISD::ADD:
6414   case ISD::SUB:
6415     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6416            "Unexpected custom legalisation");
6417     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6418     break;
6419   case ISD::SHL:
6420   case ISD::SRA:
6421   case ISD::SRL:
6422     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6423            "Unexpected custom legalisation");
6424     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6425       Results.push_back(customLegalizeToWOp(N, DAG));
6426       break;
6427     }
6428 
6429     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6430     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6431     // shift amount.
6432     if (N->getOpcode() == ISD::SHL) {
6433       SDLoc DL(N);
6434       SDValue NewOp0 =
6435           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6436       SDValue NewOp1 =
6437           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6438       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6439       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6440                                    DAG.getValueType(MVT::i32));
6441       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6442     }
6443 
6444     break;
6445   case ISD::ROTL:
6446   case ISD::ROTR:
6447     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6448            "Unexpected custom legalisation");
6449     Results.push_back(customLegalizeToWOp(N, DAG));
6450     break;
6451   case ISD::CTTZ:
6452   case ISD::CTTZ_ZERO_UNDEF:
6453   case ISD::CTLZ:
6454   case ISD::CTLZ_ZERO_UNDEF: {
6455     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6456            "Unexpected custom legalisation");
6457 
6458     SDValue NewOp0 =
6459         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6460     bool IsCTZ =
6461         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6462     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6463     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6464     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6465     return;
6466   }
6467   case ISD::SDIV:
6468   case ISD::UDIV:
6469   case ISD::UREM: {
6470     MVT VT = N->getSimpleValueType(0);
6471     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6472            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6473            "Unexpected custom legalisation");
6474     // Don't promote division/remainder by constant since we should expand those
6475     // to multiply by magic constant.
6476     // FIXME: What if the expansion is disabled for minsize.
6477     if (N->getOperand(1).getOpcode() == ISD::Constant)
6478       return;
6479 
6480     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6481     // the upper 32 bits. For other types we need to sign or zero extend
6482     // based on the opcode.
6483     unsigned ExtOpc = ISD::ANY_EXTEND;
6484     if (VT != MVT::i32)
6485       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6486                                            : ISD::ZERO_EXTEND;
6487 
6488     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6489     break;
6490   }
6491   case ISD::UADDO:
6492   case ISD::USUBO: {
6493     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6494            "Unexpected custom legalisation");
6495     bool IsAdd = N->getOpcode() == ISD::UADDO;
6496     // Create an ADDW or SUBW.
6497     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6498     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6499     SDValue Res =
6500         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6501     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6502                       DAG.getValueType(MVT::i32));
6503 
6504     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6505     // Since the inputs are sign extended from i32, this is equivalent to
6506     // comparing the lower 32 bits.
6507     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6508     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6509                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6510 
6511     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6512     Results.push_back(Overflow);
6513     return;
6514   }
6515   case ISD::UADDSAT:
6516   case ISD::USUBSAT: {
6517     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6518            "Unexpected custom legalisation");
6519     if (Subtarget.hasStdExtZbb()) {
6520       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6521       // sign extend allows overflow of the lower 32 bits to be detected on
6522       // the promoted size.
6523       SDValue LHS =
6524           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6525       SDValue RHS =
6526           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6527       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6528       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6529       return;
6530     }
6531 
6532     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6533     // promotion for UADDO/USUBO.
6534     Results.push_back(expandAddSubSat(N, DAG));
6535     return;
6536   }
6537   case ISD::BITCAST: {
6538     EVT VT = N->getValueType(0);
6539     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6540     SDValue Op0 = N->getOperand(0);
6541     EVT Op0VT = Op0.getValueType();
6542     MVT XLenVT = Subtarget.getXLenVT();
6543     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6544       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6545       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6546     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6547                Subtarget.hasStdExtF()) {
6548       SDValue FPConv =
6549           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6550       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6551     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6552                isTypeLegal(Op0VT)) {
6553       // Custom-legalize bitcasts from fixed-length vector types to illegal
6554       // scalar types in order to improve codegen. Bitcast the vector to a
6555       // one-element vector type whose element type is the same as the result
6556       // type, and extract the first element.
6557       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6558       if (isTypeLegal(BVT)) {
6559         SDValue BVec = DAG.getBitcast(BVT, Op0);
6560         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6561                                       DAG.getConstant(0, DL, XLenVT)));
6562       }
6563     }
6564     break;
6565   }
6566   case RISCVISD::GREV:
6567   case RISCVISD::GORC: {
6568     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6569            "Unexpected custom legalisation");
6570     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6571     // This is similar to customLegalizeToWOp, except that we pass the second
6572     // operand (a TargetConstant) straight through: it is already of type
6573     // XLenVT.
6574     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6575     SDValue NewOp0 =
6576         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6577     SDValue NewOp1 =
6578         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6579     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6580     // ReplaceNodeResults requires we maintain the same type for the return
6581     // value.
6582     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6583     break;
6584   }
6585   case RISCVISD::SHFL: {
6586     // There is no SHFLIW instruction, but we can just promote the operation.
6587     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6588            "Unexpected custom legalisation");
6589     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6590     SDValue NewOp0 =
6591         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6592     SDValue NewOp1 =
6593         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6594     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6595     // ReplaceNodeResults requires we maintain the same type for the return
6596     // value.
6597     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6598     break;
6599   }
6600   case ISD::BSWAP:
6601   case ISD::BITREVERSE: {
6602     MVT VT = N->getSimpleValueType(0);
6603     MVT XLenVT = Subtarget.getXLenVT();
6604     assert((VT == MVT::i8 || VT == MVT::i16 ||
6605             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6606            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6607     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6608     unsigned Imm = VT.getSizeInBits() - 1;
6609     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6610     if (N->getOpcode() == ISD::BSWAP)
6611       Imm &= ~0x7U;
6612     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6613     SDValue GREVI =
6614         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6615     // ReplaceNodeResults requires we maintain the same type for the return
6616     // value.
6617     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6618     break;
6619   }
6620   case ISD::FSHL:
6621   case ISD::FSHR: {
6622     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6623            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6624     SDValue NewOp0 =
6625         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6626     SDValue NewOp1 =
6627         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6628     SDValue NewShAmt =
6629         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6630     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6631     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6632     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6633                            DAG.getConstant(0x1f, DL, MVT::i64));
6634     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6635     // instruction use different orders. fshl will return its first operand for
6636     // shift of zero, fshr will return its second operand. fsl and fsr both
6637     // return rs1 so the ISD nodes need to have different operand orders.
6638     // Shift amount is in rs2.
6639     unsigned Opc = RISCVISD::FSLW;
6640     if (N->getOpcode() == ISD::FSHR) {
6641       std::swap(NewOp0, NewOp1);
6642       Opc = RISCVISD::FSRW;
6643     }
6644     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6645     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6646     break;
6647   }
6648   case ISD::EXTRACT_VECTOR_ELT: {
6649     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6650     // type is illegal (currently only vXi64 RV32).
6651     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6652     // transferred to the destination register. We issue two of these from the
6653     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6654     // first element.
6655     SDValue Vec = N->getOperand(0);
6656     SDValue Idx = N->getOperand(1);
6657 
6658     // The vector type hasn't been legalized yet so we can't issue target
6659     // specific nodes if it needs legalization.
6660     // FIXME: We would manually legalize if it's important.
6661     if (!isTypeLegal(Vec.getValueType()))
6662       return;
6663 
6664     MVT VecVT = Vec.getSimpleValueType();
6665 
6666     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6667            VecVT.getVectorElementType() == MVT::i64 &&
6668            "Unexpected EXTRACT_VECTOR_ELT legalization");
6669 
6670     // If this is a fixed vector, we need to convert it to a scalable vector.
6671     MVT ContainerVT = VecVT;
6672     if (VecVT.isFixedLengthVector()) {
6673       ContainerVT = getContainerForFixedLengthVector(VecVT);
6674       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6675     }
6676 
6677     MVT XLenVT = Subtarget.getXLenVT();
6678 
6679     // Use a VL of 1 to avoid processing more elements than we need.
6680     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6681     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6682     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6683 
6684     // Unless the index is known to be 0, we must slide the vector down to get
6685     // the desired element into index 0.
6686     if (!isNullConstant(Idx)) {
6687       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6688                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6689     }
6690 
6691     // Extract the lower XLEN bits of the correct vector element.
6692     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6693 
6694     // To extract the upper XLEN bits of the vector element, shift the first
6695     // element right by 32 bits and re-extract the lower XLEN bits.
6696     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6697                                      DAG.getConstant(32, DL, XLenVT), VL);
6698     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6699                                  ThirtyTwoV, Mask, VL);
6700 
6701     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6702 
6703     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6704     break;
6705   }
6706   case ISD::INTRINSIC_WO_CHAIN: {
6707     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6708     switch (IntNo) {
6709     default:
6710       llvm_unreachable(
6711           "Don't know how to custom type legalize this intrinsic!");
6712     case Intrinsic::riscv_grev:
6713     case Intrinsic::riscv_gorc:
6714     case Intrinsic::riscv_bcompress:
6715     case Intrinsic::riscv_bdecompress:
6716     case Intrinsic::riscv_bfp: {
6717       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6718              "Unexpected custom legalisation");
6719       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6720       break;
6721     }
6722     case Intrinsic::riscv_fsl:
6723     case Intrinsic::riscv_fsr: {
6724       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6725              "Unexpected custom legalisation");
6726       SDValue NewOp1 =
6727           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6728       SDValue NewOp2 =
6729           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6730       SDValue NewOp3 =
6731           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6732       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6733       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6734       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6735       break;
6736     }
6737     case Intrinsic::riscv_orc_b: {
6738       // Lower to the GORCI encoding for orc.b with the operand extended.
6739       SDValue NewOp =
6740           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6741       // If Zbp is enabled, use GORCIW which will sign extend the result.
6742       unsigned Opc =
6743           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6744       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6745                                 DAG.getConstant(7, DL, MVT::i64));
6746       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6747       return;
6748     }
6749     case Intrinsic::riscv_shfl:
6750     case Intrinsic::riscv_unshfl: {
6751       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6752              "Unexpected custom legalisation");
6753       SDValue NewOp1 =
6754           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6755       SDValue NewOp2 =
6756           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6757       unsigned Opc =
6758           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6759       // There is no (UN)SHFLIW. If the control word is a constant, we can use
6760       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
6761       // will be shuffled the same way as the lower 32 bit half, but the two
6762       // halves won't cross.
6763       if (isa<ConstantSDNode>(NewOp2)) {
6764         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6765                              DAG.getConstant(0xf, DL, MVT::i64));
6766         Opc =
6767             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6768       }
6769       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6770       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6771       break;
6772     }
6773     case Intrinsic::riscv_vmv_x_s: {
6774       EVT VT = N->getValueType(0);
6775       MVT XLenVT = Subtarget.getXLenVT();
6776       if (VT.bitsLT(XLenVT)) {
6777         // Simple case just extract using vmv.x.s and truncate.
6778         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6779                                       Subtarget.getXLenVT(), N->getOperand(1));
6780         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6781         return;
6782       }
6783 
6784       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6785              "Unexpected custom legalization");
6786 
6787       // We need to do the move in two steps.
6788       SDValue Vec = N->getOperand(1);
6789       MVT VecVT = Vec.getSimpleValueType();
6790 
6791       // First extract the lower XLEN bits of the element.
6792       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6793 
6794       // To extract the upper XLEN bits of the vector element, shift the first
6795       // element right by 32 bits and re-extract the lower XLEN bits.
6796       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6797       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6798       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6799       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6800                                        DAG.getConstant(32, DL, XLenVT), VL);
6801       SDValue LShr32 =
6802           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6803       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6804 
6805       Results.push_back(
6806           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6807       break;
6808     }
6809     }
6810     break;
6811   }
6812   case ISD::VECREDUCE_ADD:
6813   case ISD::VECREDUCE_AND:
6814   case ISD::VECREDUCE_OR:
6815   case ISD::VECREDUCE_XOR:
6816   case ISD::VECREDUCE_SMAX:
6817   case ISD::VECREDUCE_UMAX:
6818   case ISD::VECREDUCE_SMIN:
6819   case ISD::VECREDUCE_UMIN:
6820     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6821       Results.push_back(V);
6822     break;
6823   case ISD::VP_REDUCE_ADD:
6824   case ISD::VP_REDUCE_AND:
6825   case ISD::VP_REDUCE_OR:
6826   case ISD::VP_REDUCE_XOR:
6827   case ISD::VP_REDUCE_SMAX:
6828   case ISD::VP_REDUCE_UMAX:
6829   case ISD::VP_REDUCE_SMIN:
6830   case ISD::VP_REDUCE_UMIN:
6831     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6832       Results.push_back(V);
6833     break;
6834   case ISD::FLT_ROUNDS_: {
6835     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6836     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6837     Results.push_back(Res.getValue(0));
6838     Results.push_back(Res.getValue(1));
6839     break;
6840   }
6841   }
6842 }
6843 
6844 // A structure to hold one of the bit-manipulation patterns below. Together, a
6845 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6846 //   (or (and (shl x, 1), 0xAAAAAAAA),
6847 //       (and (srl x, 1), 0x55555555))
6848 struct RISCVBitmanipPat {
6849   SDValue Op;
6850   unsigned ShAmt;
6851   bool IsSHL;
6852 
6853   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6854     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6855   }
6856 };
6857 
6858 // Matches patterns of the form
6859 //   (and (shl x, C2), (C1 << C2))
6860 //   (and (srl x, C2), C1)
6861 //   (shl (and x, C1), C2)
6862 //   (srl (and x, (C1 << C2)), C2)
6863 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6864 // The expected masks for each shift amount are specified in BitmanipMasks where
6865 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6866 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6867 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6868 // XLen is 64.
6869 static Optional<RISCVBitmanipPat>
6870 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6871   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6872          "Unexpected number of masks");
6873   Optional<uint64_t> Mask;
6874   // Optionally consume a mask around the shift operation.
6875   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6876     Mask = Op.getConstantOperandVal(1);
6877     Op = Op.getOperand(0);
6878   }
6879   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6880     return None;
6881   bool IsSHL = Op.getOpcode() == ISD::SHL;
6882 
6883   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6884     return None;
6885   uint64_t ShAmt = Op.getConstantOperandVal(1);
6886 
6887   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6888   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6889     return None;
6890   // If we don't have enough masks for 64 bit, then we must be trying to
6891   // match SHFL so we're only allowed to shift 1/4 of the width.
6892   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6893     return None;
6894 
6895   SDValue Src = Op.getOperand(0);
6896 
6897   // The expected mask is shifted left when the AND is found around SHL
6898   // patterns.
6899   //   ((x >> 1) & 0x55555555)
6900   //   ((x << 1) & 0xAAAAAAAA)
6901   bool SHLExpMask = IsSHL;
6902 
6903   if (!Mask) {
6904     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6905     // the mask is all ones: consume that now.
6906     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6907       Mask = Src.getConstantOperandVal(1);
6908       Src = Src.getOperand(0);
6909       // The expected mask is now in fact shifted left for SRL, so reverse the
6910       // decision.
6911       //   ((x & 0xAAAAAAAA) >> 1)
6912       //   ((x & 0x55555555) << 1)
6913       SHLExpMask = !SHLExpMask;
6914     } else {
6915       // Use a default shifted mask of all-ones if there's no AND, truncated
6916       // down to the expected width. This simplifies the logic later on.
6917       Mask = maskTrailingOnes<uint64_t>(Width);
6918       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6919     }
6920   }
6921 
6922   unsigned MaskIdx = Log2_32(ShAmt);
6923   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6924 
6925   if (SHLExpMask)
6926     ExpMask <<= ShAmt;
6927 
6928   if (Mask != ExpMask)
6929     return None;
6930 
6931   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6932 }
6933 
6934 // Matches any of the following bit-manipulation patterns:
6935 //   (and (shl x, 1), (0x55555555 << 1))
6936 //   (and (srl x, 1), 0x55555555)
6937 //   (shl (and x, 0x55555555), 1)
6938 //   (srl (and x, (0x55555555 << 1)), 1)
6939 // where the shift amount and mask may vary thus:
6940 //   [1]  = 0x55555555 / 0xAAAAAAAA
6941 //   [2]  = 0x33333333 / 0xCCCCCCCC
6942 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6943 //   [8]  = 0x00FF00FF / 0xFF00FF00
6944 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6945 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6946 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6947   // These are the unshifted masks which we use to match bit-manipulation
6948   // patterns. They may be shifted left in certain circumstances.
6949   static const uint64_t BitmanipMasks[] = {
6950       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6951       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6952 
6953   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6954 }
6955 
6956 // Match the following pattern as a GREVI(W) operation
6957 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6958 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6959                                const RISCVSubtarget &Subtarget) {
6960   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6961   EVT VT = Op.getValueType();
6962 
6963   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6964     auto LHS = matchGREVIPat(Op.getOperand(0));
6965     auto RHS = matchGREVIPat(Op.getOperand(1));
6966     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6967       SDLoc DL(Op);
6968       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6969                          DAG.getConstant(LHS->ShAmt, DL, VT));
6970     }
6971   }
6972   return SDValue();
6973 }
6974 
6975 // Matches any the following pattern as a GORCI(W) operation
6976 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6977 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6978 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6979 // Note that with the variant of 3.,
6980 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6981 // the inner pattern will first be matched as GREVI and then the outer
6982 // pattern will be matched to GORC via the first rule above.
6983 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6984 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6985                                const RISCVSubtarget &Subtarget) {
6986   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6987   EVT VT = Op.getValueType();
6988 
6989   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6990     SDLoc DL(Op);
6991     SDValue Op0 = Op.getOperand(0);
6992     SDValue Op1 = Op.getOperand(1);
6993 
6994     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6995       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6996           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6997           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6998         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6999       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7000       if ((Reverse.getOpcode() == ISD::ROTL ||
7001            Reverse.getOpcode() == ISD::ROTR) &&
7002           Reverse.getOperand(0) == X &&
7003           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7004         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7005         if (RotAmt == (VT.getSizeInBits() / 2))
7006           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7007                              DAG.getConstant(RotAmt, DL, VT));
7008       }
7009       return SDValue();
7010     };
7011 
7012     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7013     if (SDValue V = MatchOROfReverse(Op0, Op1))
7014       return V;
7015     if (SDValue V = MatchOROfReverse(Op1, Op0))
7016       return V;
7017 
7018     // OR is commutable so canonicalize its OR operand to the left
7019     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7020       std::swap(Op0, Op1);
7021     if (Op0.getOpcode() != ISD::OR)
7022       return SDValue();
7023     SDValue OrOp0 = Op0.getOperand(0);
7024     SDValue OrOp1 = Op0.getOperand(1);
7025     auto LHS = matchGREVIPat(OrOp0);
7026     // OR is commutable so swap the operands and try again: x might have been
7027     // on the left
7028     if (!LHS) {
7029       std::swap(OrOp0, OrOp1);
7030       LHS = matchGREVIPat(OrOp0);
7031     }
7032     auto RHS = matchGREVIPat(Op1);
7033     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7034       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7035                          DAG.getConstant(LHS->ShAmt, DL, VT));
7036     }
7037   }
7038   return SDValue();
7039 }
7040 
7041 // Matches any of the following bit-manipulation patterns:
7042 //   (and (shl x, 1), (0x22222222 << 1))
7043 //   (and (srl x, 1), 0x22222222)
7044 //   (shl (and x, 0x22222222), 1)
7045 //   (srl (and x, (0x22222222 << 1)), 1)
7046 // where the shift amount and mask may vary thus:
7047 //   [1]  = 0x22222222 / 0x44444444
7048 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7049 //   [4]  = 0x00F000F0 / 0x0F000F00
7050 //   [8]  = 0x0000FF00 / 0x00FF0000
7051 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7052 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7053   // These are the unshifted masks which we use to match bit-manipulation
7054   // patterns. They may be shifted left in certain circumstances.
7055   static const uint64_t BitmanipMasks[] = {
7056       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7057       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7058 
7059   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7060 }
7061 
7062 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7063 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7064                                const RISCVSubtarget &Subtarget) {
7065   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7066   EVT VT = Op.getValueType();
7067 
7068   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7069     return SDValue();
7070 
7071   SDValue Op0 = Op.getOperand(0);
7072   SDValue Op1 = Op.getOperand(1);
7073 
7074   // Or is commutable so canonicalize the second OR to the LHS.
7075   if (Op0.getOpcode() != ISD::OR)
7076     std::swap(Op0, Op1);
7077   if (Op0.getOpcode() != ISD::OR)
7078     return SDValue();
7079 
7080   // We found an inner OR, so our operands are the operands of the inner OR
7081   // and the other operand of the outer OR.
7082   SDValue A = Op0.getOperand(0);
7083   SDValue B = Op0.getOperand(1);
7084   SDValue C = Op1;
7085 
7086   auto Match1 = matchSHFLPat(A);
7087   auto Match2 = matchSHFLPat(B);
7088 
7089   // If neither matched, we failed.
7090   if (!Match1 && !Match2)
7091     return SDValue();
7092 
7093   // We had at least one match. if one failed, try the remaining C operand.
7094   if (!Match1) {
7095     std::swap(A, C);
7096     Match1 = matchSHFLPat(A);
7097     if (!Match1)
7098       return SDValue();
7099   } else if (!Match2) {
7100     std::swap(B, C);
7101     Match2 = matchSHFLPat(B);
7102     if (!Match2)
7103       return SDValue();
7104   }
7105   assert(Match1 && Match2);
7106 
7107   // Make sure our matches pair up.
7108   if (!Match1->formsPairWith(*Match2))
7109     return SDValue();
7110 
7111   // All the remains is to make sure C is an AND with the same input, that masks
7112   // out the bits that are being shuffled.
7113   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7114       C.getOperand(0) != Match1->Op)
7115     return SDValue();
7116 
7117   uint64_t Mask = C.getConstantOperandVal(1);
7118 
7119   static const uint64_t BitmanipMasks[] = {
7120       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7121       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7122   };
7123 
7124   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7125   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7126   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7127 
7128   if (Mask != ExpMask)
7129     return SDValue();
7130 
7131   SDLoc DL(Op);
7132   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7133                      DAG.getConstant(Match1->ShAmt, DL, VT));
7134 }
7135 
7136 // Optimize (add (shl x, c0), (shl y, c1)) ->
7137 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7138 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7139                                   const RISCVSubtarget &Subtarget) {
7140   // Perform this optimization only in the zba extension.
7141   if (!Subtarget.hasStdExtZba())
7142     return SDValue();
7143 
7144   // Skip for vector types and larger types.
7145   EVT VT = N->getValueType(0);
7146   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7147     return SDValue();
7148 
7149   // The two operand nodes must be SHL and have no other use.
7150   SDValue N0 = N->getOperand(0);
7151   SDValue N1 = N->getOperand(1);
7152   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7153       !N0->hasOneUse() || !N1->hasOneUse())
7154     return SDValue();
7155 
7156   // Check c0 and c1.
7157   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7158   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7159   if (!N0C || !N1C)
7160     return SDValue();
7161   int64_t C0 = N0C->getSExtValue();
7162   int64_t C1 = N1C->getSExtValue();
7163   if (C0 <= 0 || C1 <= 0)
7164     return SDValue();
7165 
7166   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7167   int64_t Bits = std::min(C0, C1);
7168   int64_t Diff = std::abs(C0 - C1);
7169   if (Diff != 1 && Diff != 2 && Diff != 3)
7170     return SDValue();
7171 
7172   // Build nodes.
7173   SDLoc DL(N);
7174   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7175   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7176   SDValue NA0 =
7177       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7178   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7179   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7180 }
7181 
7182 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7183 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7184 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7185 // not undo itself, but they are redundant.
7186 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7187   SDValue Src = N->getOperand(0);
7188 
7189   if (Src.getOpcode() != N->getOpcode())
7190     return SDValue();
7191 
7192   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7193       !isa<ConstantSDNode>(Src.getOperand(1)))
7194     return SDValue();
7195 
7196   unsigned ShAmt1 = N->getConstantOperandVal(1);
7197   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7198   Src = Src.getOperand(0);
7199 
7200   unsigned CombinedShAmt;
7201   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7202     CombinedShAmt = ShAmt1 | ShAmt2;
7203   else
7204     CombinedShAmt = ShAmt1 ^ ShAmt2;
7205 
7206   if (CombinedShAmt == 0)
7207     return Src;
7208 
7209   SDLoc DL(N);
7210   return DAG.getNode(
7211       N->getOpcode(), DL, N->getValueType(0), Src,
7212       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7213 }
7214 
7215 // Combine a constant select operand into its use:
7216 //
7217 // (and (select cond, -1, c), x)
7218 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7219 // (or  (select cond, 0, c), x)
7220 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7221 // (xor (select cond, 0, c), x)
7222 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7223 // (add (select cond, 0, c), x)
7224 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7225 // (sub x, (select cond, 0, c))
7226 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7227 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7228                                    SelectionDAG &DAG, bool AllOnes) {
7229   EVT VT = N->getValueType(0);
7230 
7231   // Skip vectors.
7232   if (VT.isVector())
7233     return SDValue();
7234 
7235   if ((Slct.getOpcode() != ISD::SELECT &&
7236        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7237       !Slct.hasOneUse())
7238     return SDValue();
7239 
7240   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7241     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7242   };
7243 
7244   bool SwapSelectOps;
7245   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7246   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7247   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7248   SDValue NonConstantVal;
7249   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7250     SwapSelectOps = false;
7251     NonConstantVal = FalseVal;
7252   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7253     SwapSelectOps = true;
7254     NonConstantVal = TrueVal;
7255   } else
7256     return SDValue();
7257 
7258   // Slct is now know to be the desired identity constant when CC is true.
7259   TrueVal = OtherOp;
7260   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7261   // Unless SwapSelectOps says the condition should be false.
7262   if (SwapSelectOps)
7263     std::swap(TrueVal, FalseVal);
7264 
7265   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7266     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7267                        {Slct.getOperand(0), Slct.getOperand(1),
7268                         Slct.getOperand(2), TrueVal, FalseVal});
7269 
7270   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7271                      {Slct.getOperand(0), TrueVal, FalseVal});
7272 }
7273 
7274 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7275 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7276                                               bool AllOnes) {
7277   SDValue N0 = N->getOperand(0);
7278   SDValue N1 = N->getOperand(1);
7279   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7280     return Result;
7281   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7282     return Result;
7283   return SDValue();
7284 }
7285 
7286 // Transform (add (mul x, c0), c1) ->
7287 //           (add (mul (add x, c1/c0), c0), c1%c0).
7288 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7289 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7290 // to an infinite loop in DAGCombine if transformed.
7291 // Or transform (add (mul x, c0), c1) ->
7292 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7293 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7294 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7295 // lead to an infinite loop in DAGCombine if transformed.
7296 // Or transform (add (mul x, c0), c1) ->
7297 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7298 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7299 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7300 // lead to an infinite loop in DAGCombine if transformed.
7301 // Or transform (add (mul x, c0), c1) ->
7302 //              (mul (add x, c1/c0), c0).
7303 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7304 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7305                                      const RISCVSubtarget &Subtarget) {
7306   // Skip for vector types and larger types.
7307   EVT VT = N->getValueType(0);
7308   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7309     return SDValue();
7310   // The first operand node must be a MUL and has no other use.
7311   SDValue N0 = N->getOperand(0);
7312   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7313     return SDValue();
7314   // Check if c0 and c1 match above conditions.
7315   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7316   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7317   if (!N0C || !N1C)
7318     return SDValue();
7319   int64_t C0 = N0C->getSExtValue();
7320   int64_t C1 = N1C->getSExtValue();
7321   int64_t CA, CB;
7322   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7323     return SDValue();
7324   // Search for proper CA (non-zero) and CB that both are simm12.
7325   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7326       !isInt<12>(C0 * (C1 / C0))) {
7327     CA = C1 / C0;
7328     CB = C1 % C0;
7329   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7330              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7331     CA = C1 / C0 + 1;
7332     CB = C1 % C0 - C0;
7333   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7334              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7335     CA = C1 / C0 - 1;
7336     CB = C1 % C0 + C0;
7337   } else
7338     return SDValue();
7339   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7340   SDLoc DL(N);
7341   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7342                              DAG.getConstant(CA, DL, VT));
7343   SDValue New1 =
7344       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7345   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7346 }
7347 
7348 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7349                                  const RISCVSubtarget &Subtarget) {
7350   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7351     return V;
7352   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7353     return V;
7354   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7355   //      (select lhs, rhs, cc, x, (add x, y))
7356   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7357 }
7358 
7359 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7360   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7361   //      (select lhs, rhs, cc, x, (sub x, y))
7362   SDValue N0 = N->getOperand(0);
7363   SDValue N1 = N->getOperand(1);
7364   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7365 }
7366 
7367 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7368   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7369   //      (select lhs, rhs, cc, x, (and x, y))
7370   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7371 }
7372 
7373 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7374                                 const RISCVSubtarget &Subtarget) {
7375   if (Subtarget.hasStdExtZbp()) {
7376     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7377       return GREV;
7378     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7379       return GORC;
7380     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7381       return SHFL;
7382   }
7383 
7384   // fold (or (select cond, 0, y), x) ->
7385   //      (select cond, x, (or x, y))
7386   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7387 }
7388 
7389 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7390   // fold (xor (select cond, 0, y), x) ->
7391   //      (select cond, x, (xor x, y))
7392   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7393 }
7394 
7395 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7396 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7397 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7398 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7399 // ADDW/SUBW/MULW.
7400 static SDValue performANY_EXTENDCombine(SDNode *N,
7401                                         TargetLowering::DAGCombinerInfo &DCI,
7402                                         const RISCVSubtarget &Subtarget) {
7403   if (!Subtarget.is64Bit())
7404     return SDValue();
7405 
7406   SelectionDAG &DAG = DCI.DAG;
7407 
7408   SDValue Src = N->getOperand(0);
7409   EVT VT = N->getValueType(0);
7410   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7411     return SDValue();
7412 
7413   // The opcode must be one that can implicitly sign_extend.
7414   // FIXME: Additional opcodes.
7415   switch (Src.getOpcode()) {
7416   default:
7417     return SDValue();
7418   case ISD::MUL:
7419     if (!Subtarget.hasStdExtM())
7420       return SDValue();
7421     LLVM_FALLTHROUGH;
7422   case ISD::ADD:
7423   case ISD::SUB:
7424     break;
7425   }
7426 
7427   // Only handle cases where the result is used by a CopyToReg. That likely
7428   // means the value is a liveout of the basic block. This helps prevent
7429   // infinite combine loops like PR51206.
7430   if (none_of(N->uses(),
7431               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7432     return SDValue();
7433 
7434   SmallVector<SDNode *, 4> SetCCs;
7435   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7436                             UE = Src.getNode()->use_end();
7437        UI != UE; ++UI) {
7438     SDNode *User = *UI;
7439     if (User == N)
7440       continue;
7441     if (UI.getUse().getResNo() != Src.getResNo())
7442       continue;
7443     // All i32 setccs are legalized by sign extending operands.
7444     if (User->getOpcode() == ISD::SETCC) {
7445       SetCCs.push_back(User);
7446       continue;
7447     }
7448     // We don't know if we can extend this user.
7449     break;
7450   }
7451 
7452   // If we don't have any SetCCs, this isn't worthwhile.
7453   if (SetCCs.empty())
7454     return SDValue();
7455 
7456   SDLoc DL(N);
7457   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7458   DCI.CombineTo(N, SExt);
7459 
7460   // Promote all the setccs.
7461   for (SDNode *SetCC : SetCCs) {
7462     SmallVector<SDValue, 4> Ops;
7463 
7464     for (unsigned j = 0; j != 2; ++j) {
7465       SDValue SOp = SetCC->getOperand(j);
7466       if (SOp == Src)
7467         Ops.push_back(SExt);
7468       else
7469         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7470     }
7471 
7472     Ops.push_back(SetCC->getOperand(2));
7473     DCI.CombineTo(SetCC,
7474                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7475   }
7476   return SDValue(N, 0);
7477 }
7478 
7479 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7480 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7481 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7482                                              bool Commute = false) {
7483   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7484           N->getOpcode() == RISCVISD::SUB_VL) &&
7485          "Unexpected opcode");
7486   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7487   SDValue Op0 = N->getOperand(0);
7488   SDValue Op1 = N->getOperand(1);
7489   if (Commute)
7490     std::swap(Op0, Op1);
7491 
7492   MVT VT = N->getSimpleValueType(0);
7493 
7494   // Determine the narrow size for a widening add/sub.
7495   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7496   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7497                                   VT.getVectorElementCount());
7498 
7499   SDValue Mask = N->getOperand(2);
7500   SDValue VL = N->getOperand(3);
7501 
7502   SDLoc DL(N);
7503 
7504   // If the RHS is a sext or zext, we can form a widening op.
7505   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7506        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7507       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7508     unsigned ExtOpc = Op1.getOpcode();
7509     Op1 = Op1.getOperand(0);
7510     // Re-introduce narrower extends if needed.
7511     if (Op1.getValueType() != NarrowVT)
7512       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7513 
7514     unsigned WOpc;
7515     if (ExtOpc == RISCVISD::VSEXT_VL)
7516       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7517     else
7518       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7519 
7520     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7521   }
7522 
7523   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7524   // sext/zext?
7525 
7526   return SDValue();
7527 }
7528 
7529 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7530 // vwsub(u).vv/vx.
7531 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7532   SDValue Op0 = N->getOperand(0);
7533   SDValue Op1 = N->getOperand(1);
7534   SDValue Mask = N->getOperand(2);
7535   SDValue VL = N->getOperand(3);
7536 
7537   MVT VT = N->getSimpleValueType(0);
7538   MVT NarrowVT = Op1.getSimpleValueType();
7539   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7540 
7541   unsigned VOpc;
7542   switch (N->getOpcode()) {
7543   default: llvm_unreachable("Unexpected opcode");
7544   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7545   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7546   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7547   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7548   }
7549 
7550   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7551                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7552 
7553   SDLoc DL(N);
7554 
7555   // If the LHS is a sext or zext, we can narrow this op to the same size as
7556   // the RHS.
7557   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7558        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7559       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7560     unsigned ExtOpc = Op0.getOpcode();
7561     Op0 = Op0.getOperand(0);
7562     // Re-introduce narrower extends if needed.
7563     if (Op0.getValueType() != NarrowVT)
7564       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7565     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7566   }
7567 
7568   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7569                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7570 
7571   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7572   // to commute and use a vwadd(u).vx instead.
7573   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7574       Op0.getOperand(1) == VL) {
7575     Op0 = Op0.getOperand(0);
7576 
7577     // See if have enough sign bits or zero bits in the scalar to use a
7578     // widening add/sub by splatting to smaller element size.
7579     unsigned EltBits = VT.getScalarSizeInBits();
7580     unsigned ScalarBits = Op0.getValueSizeInBits();
7581     // Make sure we're getting all element bits from the scalar register.
7582     // FIXME: Support implicit sign extension of vmv.v.x?
7583     if (ScalarBits < EltBits)
7584       return SDValue();
7585 
7586     if (IsSigned) {
7587       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7588         return SDValue();
7589     } else {
7590       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7591       if (!DAG.MaskedValueIsZero(Op0, Mask))
7592         return SDValue();
7593     }
7594 
7595     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op0, VL);
7596     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7597   }
7598 
7599   return SDValue();
7600 }
7601 
7602 // Try to form VWMUL, VWMULU or VWMULSU.
7603 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7604 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7605                                        bool Commute) {
7606   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7607   SDValue Op0 = N->getOperand(0);
7608   SDValue Op1 = N->getOperand(1);
7609   if (Commute)
7610     std::swap(Op0, Op1);
7611 
7612   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7613   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7614   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7615   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7616     return SDValue();
7617 
7618   SDValue Mask = N->getOperand(2);
7619   SDValue VL = N->getOperand(3);
7620 
7621   // Make sure the mask and VL match.
7622   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7623     return SDValue();
7624 
7625   MVT VT = N->getSimpleValueType(0);
7626 
7627   // Determine the narrow size for a widening multiply.
7628   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7629   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7630                                   VT.getVectorElementCount());
7631 
7632   SDLoc DL(N);
7633 
7634   // See if the other operand is the same opcode.
7635   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7636     if (!Op1.hasOneUse())
7637       return SDValue();
7638 
7639     // Make sure the mask and VL match.
7640     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7641       return SDValue();
7642 
7643     Op1 = Op1.getOperand(0);
7644   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7645     // The operand is a splat of a scalar.
7646 
7647     // The VL must be the same.
7648     if (Op1.getOperand(1) != VL)
7649       return SDValue();
7650 
7651     // Get the scalar value.
7652     Op1 = Op1.getOperand(0);
7653 
7654     // See if have enough sign bits or zero bits in the scalar to use a
7655     // widening multiply by splatting to smaller element size.
7656     unsigned EltBits = VT.getScalarSizeInBits();
7657     unsigned ScalarBits = Op1.getValueSizeInBits();
7658     // Make sure we're getting all element bits from the scalar register.
7659     // FIXME: Support implicit sign extension of vmv.v.x?
7660     if (ScalarBits < EltBits)
7661       return SDValue();
7662 
7663     if (IsSignExt) {
7664       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
7665         return SDValue();
7666     } else {
7667       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7668       if (!DAG.MaskedValueIsZero(Op1, Mask))
7669         return SDValue();
7670     }
7671 
7672     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
7673   } else
7674     return SDValue();
7675 
7676   Op0 = Op0.getOperand(0);
7677 
7678   // Re-introduce narrower extends if needed.
7679   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7680   if (Op0.getValueType() != NarrowVT)
7681     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7682   if (Op1.getValueType() != NarrowVT)
7683     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7684 
7685   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7686   if (!IsVWMULSU)
7687     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7688   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7689 }
7690 
7691 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7692   switch (Op.getOpcode()) {
7693   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7694   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7695   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7696   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7697   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7698   }
7699 
7700   return RISCVFPRndMode::Invalid;
7701 }
7702 
7703 // Fold
7704 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7705 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7706 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7707 //   (fp_to_int (fceil X))      -> fcvt X, rup
7708 //   (fp_to_int (fround X))     -> fcvt X, rmm
7709 static SDValue performFP_TO_INTCombine(SDNode *N,
7710                                        TargetLowering::DAGCombinerInfo &DCI,
7711                                        const RISCVSubtarget &Subtarget) {
7712   SelectionDAG &DAG = DCI.DAG;
7713   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7714   MVT XLenVT = Subtarget.getXLenVT();
7715 
7716   // Only handle XLen or i32 types. Other types narrower than XLen will
7717   // eventually be legalized to XLenVT.
7718   EVT VT = N->getValueType(0);
7719   if (VT != MVT::i32 && VT != XLenVT)
7720     return SDValue();
7721 
7722   SDValue Src = N->getOperand(0);
7723 
7724   // Ensure the FP type is also legal.
7725   if (!TLI.isTypeLegal(Src.getValueType()))
7726     return SDValue();
7727 
7728   // Don't do this for f16 with Zfhmin and not Zfh.
7729   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7730     return SDValue();
7731 
7732   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7733   if (FRM == RISCVFPRndMode::Invalid)
7734     return SDValue();
7735 
7736   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7737 
7738   unsigned Opc;
7739   if (VT == XLenVT)
7740     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7741   else
7742     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7743 
7744   SDLoc DL(N);
7745   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7746                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7747   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7748 }
7749 
7750 // Fold
7751 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7752 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7753 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7754 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7755 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7756 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7757                                        TargetLowering::DAGCombinerInfo &DCI,
7758                                        const RISCVSubtarget &Subtarget) {
7759   SelectionDAG &DAG = DCI.DAG;
7760   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7761   MVT XLenVT = Subtarget.getXLenVT();
7762 
7763   // Only handle XLen types. Other types narrower than XLen will eventually be
7764   // legalized to XLenVT.
7765   EVT DstVT = N->getValueType(0);
7766   if (DstVT != XLenVT)
7767     return SDValue();
7768 
7769   SDValue Src = N->getOperand(0);
7770 
7771   // Ensure the FP type is also legal.
7772   if (!TLI.isTypeLegal(Src.getValueType()))
7773     return SDValue();
7774 
7775   // Don't do this for f16 with Zfhmin and not Zfh.
7776   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7777     return SDValue();
7778 
7779   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7780 
7781   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7782   if (FRM == RISCVFPRndMode::Invalid)
7783     return SDValue();
7784 
7785   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7786 
7787   unsigned Opc;
7788   if (SatVT == DstVT)
7789     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7790   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7791     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7792   else
7793     return SDValue();
7794   // FIXME: Support other SatVTs by clamping before or after the conversion.
7795 
7796   Src = Src.getOperand(0);
7797 
7798   SDLoc DL(N);
7799   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7800                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7801 
7802   // RISCV FP-to-int conversions saturate to the destination register size, but
7803   // don't produce 0 for nan.
7804   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7805   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7806 }
7807 
7808 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7809                                                DAGCombinerInfo &DCI) const {
7810   SelectionDAG &DAG = DCI.DAG;
7811 
7812   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7813   // bits are demanded. N will be added to the Worklist if it was not deleted.
7814   // Caller should return SDValue(N, 0) if this returns true.
7815   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7816     SDValue Op = N->getOperand(OpNo);
7817     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7818     if (!SimplifyDemandedBits(Op, Mask, DCI))
7819       return false;
7820 
7821     if (N->getOpcode() != ISD::DELETED_NODE)
7822       DCI.AddToWorklist(N);
7823     return true;
7824   };
7825 
7826   switch (N->getOpcode()) {
7827   default:
7828     break;
7829   case RISCVISD::SplitF64: {
7830     SDValue Op0 = N->getOperand(0);
7831     // If the input to SplitF64 is just BuildPairF64 then the operation is
7832     // redundant. Instead, use BuildPairF64's operands directly.
7833     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7834       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7835 
7836     if (Op0->isUndef()) {
7837       SDValue Lo = DAG.getUNDEF(MVT::i32);
7838       SDValue Hi = DAG.getUNDEF(MVT::i32);
7839       return DCI.CombineTo(N, Lo, Hi);
7840     }
7841 
7842     SDLoc DL(N);
7843 
7844     // It's cheaper to materialise two 32-bit integers than to load a double
7845     // from the constant pool and transfer it to integer registers through the
7846     // stack.
7847     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7848       APInt V = C->getValueAPF().bitcastToAPInt();
7849       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7850       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7851       return DCI.CombineTo(N, Lo, Hi);
7852     }
7853 
7854     // This is a target-specific version of a DAGCombine performed in
7855     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7856     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7857     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7858     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7859         !Op0.getNode()->hasOneUse())
7860       break;
7861     SDValue NewSplitF64 =
7862         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7863                     Op0.getOperand(0));
7864     SDValue Lo = NewSplitF64.getValue(0);
7865     SDValue Hi = NewSplitF64.getValue(1);
7866     APInt SignBit = APInt::getSignMask(32);
7867     if (Op0.getOpcode() == ISD::FNEG) {
7868       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
7869                                   DAG.getConstant(SignBit, DL, MVT::i32));
7870       return DCI.CombineTo(N, Lo, NewHi);
7871     }
7872     assert(Op0.getOpcode() == ISD::FABS);
7873     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
7874                                 DAG.getConstant(~SignBit, DL, MVT::i32));
7875     return DCI.CombineTo(N, Lo, NewHi);
7876   }
7877   case RISCVISD::SLLW:
7878   case RISCVISD::SRAW:
7879   case RISCVISD::SRLW:
7880   case RISCVISD::ROLW:
7881   case RISCVISD::RORW: {
7882     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7883     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7884         SimplifyDemandedLowBitsHelper(1, 5))
7885       return SDValue(N, 0);
7886     break;
7887   }
7888   case RISCVISD::CLZW:
7889   case RISCVISD::CTZW: {
7890     // Only the lower 32 bits of the first operand are read
7891     if (SimplifyDemandedLowBitsHelper(0, 32))
7892       return SDValue(N, 0);
7893     break;
7894   }
7895   case RISCVISD::GREV:
7896   case RISCVISD::GORC: {
7897     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
7898     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7899     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7900     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
7901       return SDValue(N, 0);
7902 
7903     return combineGREVI_GORCI(N, DAG);
7904   }
7905   case RISCVISD::GREVW:
7906   case RISCVISD::GORCW: {
7907     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7908     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7909         SimplifyDemandedLowBitsHelper(1, 5))
7910       return SDValue(N, 0);
7911 
7912     return combineGREVI_GORCI(N, DAG);
7913   }
7914   case RISCVISD::SHFL:
7915   case RISCVISD::UNSHFL: {
7916     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
7917     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7918     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7919     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
7920       return SDValue(N, 0);
7921 
7922     break;
7923   }
7924   case RISCVISD::SHFLW:
7925   case RISCVISD::UNSHFLW: {
7926     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
7927     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7928         SimplifyDemandedLowBitsHelper(1, 4))
7929       return SDValue(N, 0);
7930 
7931     break;
7932   }
7933   case RISCVISD::BCOMPRESSW:
7934   case RISCVISD::BDECOMPRESSW: {
7935     // Only the lower 32 bits of LHS and RHS are read.
7936     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7937         SimplifyDemandedLowBitsHelper(1, 32))
7938       return SDValue(N, 0);
7939 
7940     break;
7941   }
7942   case RISCVISD::FMV_X_ANYEXTH:
7943   case RISCVISD::FMV_X_ANYEXTW_RV64: {
7944     SDLoc DL(N);
7945     SDValue Op0 = N->getOperand(0);
7946     MVT VT = N->getSimpleValueType(0);
7947     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7948     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7949     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7950     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7951          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7952         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7953          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7954       assert(Op0.getOperand(0).getValueType() == VT &&
7955              "Unexpected value type!");
7956       return Op0.getOperand(0);
7957     }
7958 
7959     // This is a target-specific version of a DAGCombine performed in
7960     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7961     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7962     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7963     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7964         !Op0.getNode()->hasOneUse())
7965       break;
7966     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7967     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7968     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7969     if (Op0.getOpcode() == ISD::FNEG)
7970       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7971                          DAG.getConstant(SignBit, DL, VT));
7972 
7973     assert(Op0.getOpcode() == ISD::FABS);
7974     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7975                        DAG.getConstant(~SignBit, DL, VT));
7976   }
7977   case ISD::ADD:
7978     return performADDCombine(N, DAG, Subtarget);
7979   case ISD::SUB:
7980     return performSUBCombine(N, DAG);
7981   case ISD::AND:
7982     return performANDCombine(N, DAG);
7983   case ISD::OR:
7984     return performORCombine(N, DAG, Subtarget);
7985   case ISD::XOR:
7986     return performXORCombine(N, DAG);
7987   case ISD::ANY_EXTEND:
7988     return performANY_EXTENDCombine(N, DCI, Subtarget);
7989   case ISD::ZERO_EXTEND:
7990     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7991     // type legalization. This is safe because fp_to_uint produces poison if
7992     // it overflows.
7993     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
7994       SDValue Src = N->getOperand(0);
7995       if (Src.getOpcode() == ISD::FP_TO_UINT &&
7996           isTypeLegal(Src.getOperand(0).getValueType()))
7997         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
7998                            Src.getOperand(0));
7999       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8000           isTypeLegal(Src.getOperand(1).getValueType())) {
8001         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8002         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8003                                   Src.getOperand(0), Src.getOperand(1));
8004         DCI.CombineTo(N, Res);
8005         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8006         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8007         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8008       }
8009     }
8010     return SDValue();
8011   case RISCVISD::SELECT_CC: {
8012     // Transform
8013     SDValue LHS = N->getOperand(0);
8014     SDValue RHS = N->getOperand(1);
8015     SDValue TrueV = N->getOperand(3);
8016     SDValue FalseV = N->getOperand(4);
8017 
8018     // If the True and False values are the same, we don't need a select_cc.
8019     if (TrueV == FalseV)
8020       return TrueV;
8021 
8022     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8023     if (!ISD::isIntEqualitySetCC(CCVal))
8024       break;
8025 
8026     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8027     //      (select_cc X, Y, lt, trueV, falseV)
8028     // Sometimes the setcc is introduced after select_cc has been formed.
8029     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8030         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8031       // If we're looking for eq 0 instead of ne 0, we need to invert the
8032       // condition.
8033       bool Invert = CCVal == ISD::SETEQ;
8034       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8035       if (Invert)
8036         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8037 
8038       SDLoc DL(N);
8039       RHS = LHS.getOperand(1);
8040       LHS = LHS.getOperand(0);
8041       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8042 
8043       SDValue TargetCC = DAG.getCondCode(CCVal);
8044       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8045                          {LHS, RHS, TargetCC, TrueV, FalseV});
8046     }
8047 
8048     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8049     //      (select_cc X, Y, eq/ne, trueV, falseV)
8050     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8051       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8052                          {LHS.getOperand(0), LHS.getOperand(1),
8053                           N->getOperand(2), TrueV, FalseV});
8054     // (select_cc X, 1, setne, trueV, falseV) ->
8055     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8056     // This can occur when legalizing some floating point comparisons.
8057     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8058     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8059       SDLoc DL(N);
8060       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8061       SDValue TargetCC = DAG.getCondCode(CCVal);
8062       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8063       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8064                          {LHS, RHS, TargetCC, TrueV, FalseV});
8065     }
8066 
8067     break;
8068   }
8069   case RISCVISD::BR_CC: {
8070     SDValue LHS = N->getOperand(1);
8071     SDValue RHS = N->getOperand(2);
8072     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8073     if (!ISD::isIntEqualitySetCC(CCVal))
8074       break;
8075 
8076     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8077     //      (br_cc X, Y, lt, dest)
8078     // Sometimes the setcc is introduced after br_cc has been formed.
8079     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8080         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8081       // If we're looking for eq 0 instead of ne 0, we need to invert the
8082       // condition.
8083       bool Invert = CCVal == ISD::SETEQ;
8084       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8085       if (Invert)
8086         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8087 
8088       SDLoc DL(N);
8089       RHS = LHS.getOperand(1);
8090       LHS = LHS.getOperand(0);
8091       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8092 
8093       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8094                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8095                          N->getOperand(4));
8096     }
8097 
8098     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8099     //      (br_cc X, Y, eq/ne, trueV, falseV)
8100     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8101       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8102                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8103                          N->getOperand(3), N->getOperand(4));
8104 
8105     // (br_cc X, 1, setne, br_cc) ->
8106     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8107     // This can occur when legalizing some floating point comparisons.
8108     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8109     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8110       SDLoc DL(N);
8111       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8112       SDValue TargetCC = DAG.getCondCode(CCVal);
8113       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8114       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8115                          N->getOperand(0), LHS, RHS, TargetCC,
8116                          N->getOperand(4));
8117     }
8118     break;
8119   }
8120   case ISD::FP_TO_SINT:
8121   case ISD::FP_TO_UINT:
8122     return performFP_TO_INTCombine(N, DCI, Subtarget);
8123   case ISD::FP_TO_SINT_SAT:
8124   case ISD::FP_TO_UINT_SAT:
8125     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8126   case ISD::FCOPYSIGN: {
8127     EVT VT = N->getValueType(0);
8128     if (!VT.isVector())
8129       break;
8130     // There is a form of VFSGNJ which injects the negated sign of its second
8131     // operand. Try and bubble any FNEG up after the extend/round to produce
8132     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8133     // TRUNC=1.
8134     SDValue In2 = N->getOperand(1);
8135     // Avoid cases where the extend/round has multiple uses, as duplicating
8136     // those is typically more expensive than removing a fneg.
8137     if (!In2.hasOneUse())
8138       break;
8139     if (In2.getOpcode() != ISD::FP_EXTEND &&
8140         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8141       break;
8142     In2 = In2.getOperand(0);
8143     if (In2.getOpcode() != ISD::FNEG)
8144       break;
8145     SDLoc DL(N);
8146     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8147     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8148                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8149   }
8150   case ISD::MGATHER:
8151   case ISD::MSCATTER:
8152   case ISD::VP_GATHER:
8153   case ISD::VP_SCATTER: {
8154     if (!DCI.isBeforeLegalize())
8155       break;
8156     SDValue Index, ScaleOp;
8157     bool IsIndexScaled = false;
8158     bool IsIndexSigned = false;
8159     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8160       Index = VPGSN->getIndex();
8161       ScaleOp = VPGSN->getScale();
8162       IsIndexScaled = VPGSN->isIndexScaled();
8163       IsIndexSigned = VPGSN->isIndexSigned();
8164     } else {
8165       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8166       Index = MGSN->getIndex();
8167       ScaleOp = MGSN->getScale();
8168       IsIndexScaled = MGSN->isIndexScaled();
8169       IsIndexSigned = MGSN->isIndexSigned();
8170     }
8171     EVT IndexVT = Index.getValueType();
8172     MVT XLenVT = Subtarget.getXLenVT();
8173     // RISCV indexed loads only support the "unsigned unscaled" addressing
8174     // mode, so anything else must be manually legalized.
8175     bool NeedsIdxLegalization =
8176         IsIndexScaled ||
8177         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8178     if (!NeedsIdxLegalization)
8179       break;
8180 
8181     SDLoc DL(N);
8182 
8183     // Any index legalization should first promote to XLenVT, so we don't lose
8184     // bits when scaling. This may create an illegal index type so we let
8185     // LLVM's legalization take care of the splitting.
8186     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8187     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8188       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8189       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8190                           DL, IndexVT, Index);
8191     }
8192 
8193     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8194     if (IsIndexScaled && Scale != 1) {
8195       // Manually scale the indices by the element size.
8196       // TODO: Sanitize the scale operand here?
8197       // TODO: For VP nodes, should we use VP_SHL here?
8198       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8199       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8200       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8201     }
8202 
8203     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8204     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8205       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8206                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8207                               VPGN->getScale(), VPGN->getMask(),
8208                               VPGN->getVectorLength()},
8209                              VPGN->getMemOperand(), NewIndexTy);
8210     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8211       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8212                               {VPSN->getChain(), VPSN->getValue(),
8213                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8214                                VPSN->getMask(), VPSN->getVectorLength()},
8215                               VPSN->getMemOperand(), NewIndexTy);
8216     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8217       return DAG.getMaskedGather(
8218           N->getVTList(), MGN->getMemoryVT(), DL,
8219           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8220            MGN->getBasePtr(), Index, MGN->getScale()},
8221           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8222     const auto *MSN = cast<MaskedScatterSDNode>(N);
8223     return DAG.getMaskedScatter(
8224         N->getVTList(), MSN->getMemoryVT(), DL,
8225         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8226          Index, MSN->getScale()},
8227         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8228   }
8229   case RISCVISD::SRA_VL:
8230   case RISCVISD::SRL_VL:
8231   case RISCVISD::SHL_VL: {
8232     SDValue ShAmt = N->getOperand(1);
8233     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8234       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8235       SDLoc DL(N);
8236       SDValue VL = N->getOperand(3);
8237       EVT VT = N->getValueType(0);
8238       ShAmt =
8239           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
8240       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8241                          N->getOperand(2), N->getOperand(3));
8242     }
8243     break;
8244   }
8245   case ISD::SRA:
8246   case ISD::SRL:
8247   case ISD::SHL: {
8248     SDValue ShAmt = N->getOperand(1);
8249     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8250       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8251       SDLoc DL(N);
8252       EVT VT = N->getValueType(0);
8253       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0),
8254                           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL,
8255                                                 Subtarget.getXLenVT()));
8256       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8257     }
8258     break;
8259   }
8260   case RISCVISD::ADD_VL:
8261     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8262       return V;
8263     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8264   case RISCVISD::SUB_VL:
8265     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8266   case RISCVISD::VWADD_W_VL:
8267   case RISCVISD::VWADDU_W_VL:
8268   case RISCVISD::VWSUB_W_VL:
8269   case RISCVISD::VWSUBU_W_VL:
8270     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8271   case RISCVISD::MUL_VL:
8272     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8273       return V;
8274     // Mul is commutative.
8275     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8276   case ISD::STORE: {
8277     auto *Store = cast<StoreSDNode>(N);
8278     SDValue Val = Store->getValue();
8279     // Combine store of vmv.x.s to vse with VL of 1.
8280     // FIXME: Support FP.
8281     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8282       SDValue Src = Val.getOperand(0);
8283       EVT VecVT = Src.getValueType();
8284       EVT MemVT = Store->getMemoryVT();
8285       // The memory VT and the element type must match.
8286       if (VecVT.getVectorElementType() == MemVT) {
8287         SDLoc DL(N);
8288         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8289         return DAG.getStoreVP(
8290             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8291             DAG.getConstant(1, DL, MaskVT),
8292             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8293             Store->getMemOperand(), Store->getAddressingMode(),
8294             Store->isTruncatingStore(), /*IsCompress*/ false);
8295       }
8296     }
8297 
8298     break;
8299   }
8300   case ISD::SPLAT_VECTOR: {
8301     EVT VT = N->getValueType(0);
8302     // Only perform this combine on legal MVT types.
8303     if (!isTypeLegal(VT))
8304       break;
8305     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8306                                          DAG, Subtarget))
8307       return Gather;
8308     break;
8309   }
8310   }
8311 
8312   return SDValue();
8313 }
8314 
8315 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8316     const SDNode *N, CombineLevel Level) const {
8317   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8318   // materialised in fewer instructions than `(OP _, c1)`:
8319   //
8320   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8321   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8322   SDValue N0 = N->getOperand(0);
8323   EVT Ty = N0.getValueType();
8324   if (Ty.isScalarInteger() &&
8325       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8326     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8327     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8328     if (C1 && C2) {
8329       const APInt &C1Int = C1->getAPIntValue();
8330       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8331 
8332       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8333       // and the combine should happen, to potentially allow further combines
8334       // later.
8335       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8336           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8337         return true;
8338 
8339       // We can materialise `c1` in an add immediate, so it's "free", and the
8340       // combine should be prevented.
8341       if (C1Int.getMinSignedBits() <= 64 &&
8342           isLegalAddImmediate(C1Int.getSExtValue()))
8343         return false;
8344 
8345       // Neither constant will fit into an immediate, so find materialisation
8346       // costs.
8347       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8348                                               Subtarget.getFeatureBits(),
8349                                               /*CompressionCost*/true);
8350       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8351           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8352           /*CompressionCost*/true);
8353 
8354       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8355       // combine should be prevented.
8356       if (C1Cost < ShiftedC1Cost)
8357         return false;
8358     }
8359   }
8360   return true;
8361 }
8362 
8363 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8364     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8365     TargetLoweringOpt &TLO) const {
8366   // Delay this optimization as late as possible.
8367   if (!TLO.LegalOps)
8368     return false;
8369 
8370   EVT VT = Op.getValueType();
8371   if (VT.isVector())
8372     return false;
8373 
8374   // Only handle AND for now.
8375   if (Op.getOpcode() != ISD::AND)
8376     return false;
8377 
8378   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8379   if (!C)
8380     return false;
8381 
8382   const APInt &Mask = C->getAPIntValue();
8383 
8384   // Clear all non-demanded bits initially.
8385   APInt ShrunkMask = Mask & DemandedBits;
8386 
8387   // Try to make a smaller immediate by setting undemanded bits.
8388 
8389   APInt ExpandedMask = Mask | ~DemandedBits;
8390 
8391   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8392     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8393   };
8394   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8395     if (NewMask == Mask)
8396       return true;
8397     SDLoc DL(Op);
8398     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8399     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8400     return TLO.CombineTo(Op, NewOp);
8401   };
8402 
8403   // If the shrunk mask fits in sign extended 12 bits, let the target
8404   // independent code apply it.
8405   if (ShrunkMask.isSignedIntN(12))
8406     return false;
8407 
8408   // Preserve (and X, 0xffff) when zext.h is supported.
8409   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8410     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8411     if (IsLegalMask(NewMask))
8412       return UseMask(NewMask);
8413   }
8414 
8415   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8416   if (VT == MVT::i64) {
8417     APInt NewMask = APInt(64, 0xffffffff);
8418     if (IsLegalMask(NewMask))
8419       return UseMask(NewMask);
8420   }
8421 
8422   // For the remaining optimizations, we need to be able to make a negative
8423   // number through a combination of mask and undemanded bits.
8424   if (!ExpandedMask.isNegative())
8425     return false;
8426 
8427   // What is the fewest number of bits we need to represent the negative number.
8428   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8429 
8430   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8431   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8432   APInt NewMask = ShrunkMask;
8433   if (MinSignedBits <= 12)
8434     NewMask.setBitsFrom(11);
8435   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8436     NewMask.setBitsFrom(31);
8437   else
8438     return false;
8439 
8440   // Check that our new mask is a subset of the demanded mask.
8441   assert(IsLegalMask(NewMask));
8442   return UseMask(NewMask);
8443 }
8444 
8445 static void computeGREV(APInt &Src, unsigned ShAmt) {
8446   ShAmt &= Src.getBitWidth() - 1;
8447   uint64_t x = Src.getZExtValue();
8448   if (ShAmt & 1)
8449     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8450   if (ShAmt & 2)
8451     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8452   if (ShAmt & 4)
8453     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8454   if (ShAmt & 8)
8455     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8456   if (ShAmt & 16)
8457     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8458   if (ShAmt & 32)
8459     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8460   Src = x;
8461 }
8462 
8463 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8464                                                         KnownBits &Known,
8465                                                         const APInt &DemandedElts,
8466                                                         const SelectionDAG &DAG,
8467                                                         unsigned Depth) const {
8468   unsigned BitWidth = Known.getBitWidth();
8469   unsigned Opc = Op.getOpcode();
8470   assert((Opc >= ISD::BUILTIN_OP_END ||
8471           Opc == ISD::INTRINSIC_WO_CHAIN ||
8472           Opc == ISD::INTRINSIC_W_CHAIN ||
8473           Opc == ISD::INTRINSIC_VOID) &&
8474          "Should use MaskedValueIsZero if you don't know whether Op"
8475          " is a target node!");
8476 
8477   Known.resetAll();
8478   switch (Opc) {
8479   default: break;
8480   case RISCVISD::SELECT_CC: {
8481     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8482     // If we don't know any bits, early out.
8483     if (Known.isUnknown())
8484       break;
8485     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8486 
8487     // Only known if known in both the LHS and RHS.
8488     Known = KnownBits::commonBits(Known, Known2);
8489     break;
8490   }
8491   case RISCVISD::REMUW: {
8492     KnownBits Known2;
8493     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8494     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8495     // We only care about the lower 32 bits.
8496     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8497     // Restore the original width by sign extending.
8498     Known = Known.sext(BitWidth);
8499     break;
8500   }
8501   case RISCVISD::DIVUW: {
8502     KnownBits Known2;
8503     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8504     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8505     // We only care about the lower 32 bits.
8506     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8507     // Restore the original width by sign extending.
8508     Known = Known.sext(BitWidth);
8509     break;
8510   }
8511   case RISCVISD::CTZW: {
8512     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8513     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8514     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8515     Known.Zero.setBitsFrom(LowBits);
8516     break;
8517   }
8518   case RISCVISD::CLZW: {
8519     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8520     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8521     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8522     Known.Zero.setBitsFrom(LowBits);
8523     break;
8524   }
8525   case RISCVISD::GREV:
8526   case RISCVISD::GREVW: {
8527     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8528       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8529       if (Opc == RISCVISD::GREVW)
8530         Known = Known.trunc(32);
8531       unsigned ShAmt = C->getZExtValue();
8532       computeGREV(Known.Zero, ShAmt);
8533       computeGREV(Known.One, ShAmt);
8534       if (Opc == RISCVISD::GREVW)
8535         Known = Known.sext(BitWidth);
8536     }
8537     break;
8538   }
8539   case RISCVISD::READ_VLENB: {
8540     // If we know the minimum VLen from Zvl extensions, we can use that to
8541     // determine the trailing zeros of VLENB.
8542     // FIXME: Limit to 128 bit vectors until we have more testing.
8543     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8544     if (MinVLenB > 0)
8545       Known.Zero.setLowBits(Log2_32(MinVLenB));
8546     // We assume VLENB is no more than 65536 / 8 bytes.
8547     Known.Zero.setBitsFrom(14);
8548     break;
8549   }
8550   case ISD::INTRINSIC_W_CHAIN:
8551   case ISD::INTRINSIC_WO_CHAIN: {
8552     unsigned IntNo =
8553         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8554     switch (IntNo) {
8555     default:
8556       // We can't do anything for most intrinsics.
8557       break;
8558     case Intrinsic::riscv_vsetvli:
8559     case Intrinsic::riscv_vsetvlimax:
8560     case Intrinsic::riscv_vsetvli_opt:
8561     case Intrinsic::riscv_vsetvlimax_opt:
8562       // Assume that VL output is positive and would fit in an int32_t.
8563       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8564       if (BitWidth >= 32)
8565         Known.Zero.setBitsFrom(31);
8566       break;
8567     }
8568     break;
8569   }
8570   }
8571 }
8572 
8573 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8574     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8575     unsigned Depth) const {
8576   switch (Op.getOpcode()) {
8577   default:
8578     break;
8579   case RISCVISD::SELECT_CC: {
8580     unsigned Tmp =
8581         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8582     if (Tmp == 1) return 1;  // Early out.
8583     unsigned Tmp2 =
8584         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8585     return std::min(Tmp, Tmp2);
8586   }
8587   case RISCVISD::SLLW:
8588   case RISCVISD::SRAW:
8589   case RISCVISD::SRLW:
8590   case RISCVISD::DIVW:
8591   case RISCVISD::DIVUW:
8592   case RISCVISD::REMUW:
8593   case RISCVISD::ROLW:
8594   case RISCVISD::RORW:
8595   case RISCVISD::GREVW:
8596   case RISCVISD::GORCW:
8597   case RISCVISD::FSLW:
8598   case RISCVISD::FSRW:
8599   case RISCVISD::SHFLW:
8600   case RISCVISD::UNSHFLW:
8601   case RISCVISD::BCOMPRESSW:
8602   case RISCVISD::BDECOMPRESSW:
8603   case RISCVISD::BFPW:
8604   case RISCVISD::FCVT_W_RV64:
8605   case RISCVISD::FCVT_WU_RV64:
8606   case RISCVISD::STRICT_FCVT_W_RV64:
8607   case RISCVISD::STRICT_FCVT_WU_RV64:
8608     // TODO: As the result is sign-extended, this is conservatively correct. A
8609     // more precise answer could be calculated for SRAW depending on known
8610     // bits in the shift amount.
8611     return 33;
8612   case RISCVISD::SHFL:
8613   case RISCVISD::UNSHFL: {
8614     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8615     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8616     // will stay within the upper 32 bits. If there were more than 32 sign bits
8617     // before there will be at least 33 sign bits after.
8618     if (Op.getValueType() == MVT::i64 &&
8619         isa<ConstantSDNode>(Op.getOperand(1)) &&
8620         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8621       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8622       if (Tmp > 32)
8623         return 33;
8624     }
8625     break;
8626   }
8627   case RISCVISD::VMV_X_S: {
8628     // The number of sign bits of the scalar result is computed by obtaining the
8629     // element type of the input vector operand, subtracting its width from the
8630     // XLEN, and then adding one (sign bit within the element type). If the
8631     // element type is wider than XLen, the least-significant XLEN bits are
8632     // taken.
8633     unsigned XLen = Subtarget.getXLen();
8634     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
8635     if (EltBits <= XLen)
8636       return XLen - EltBits + 1;
8637     break;
8638   }
8639   }
8640 
8641   return 1;
8642 }
8643 
8644 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8645                                                   MachineBasicBlock *BB) {
8646   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8647 
8648   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8649   // Should the count have wrapped while it was being read, we need to try
8650   // again.
8651   // ...
8652   // read:
8653   // rdcycleh x3 # load high word of cycle
8654   // rdcycle  x2 # load low word of cycle
8655   // rdcycleh x4 # load high word of cycle
8656   // bne x3, x4, read # check if high word reads match, otherwise try again
8657   // ...
8658 
8659   MachineFunction &MF = *BB->getParent();
8660   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8661   MachineFunction::iterator It = ++BB->getIterator();
8662 
8663   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8664   MF.insert(It, LoopMBB);
8665 
8666   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8667   MF.insert(It, DoneMBB);
8668 
8669   // Transfer the remainder of BB and its successor edges to DoneMBB.
8670   DoneMBB->splice(DoneMBB->begin(), BB,
8671                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8672   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8673 
8674   BB->addSuccessor(LoopMBB);
8675 
8676   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8677   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8678   Register LoReg = MI.getOperand(0).getReg();
8679   Register HiReg = MI.getOperand(1).getReg();
8680   DebugLoc DL = MI.getDebugLoc();
8681 
8682   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8683   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8684       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8685       .addReg(RISCV::X0);
8686   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8687       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8688       .addReg(RISCV::X0);
8689   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8690       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8691       .addReg(RISCV::X0);
8692 
8693   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8694       .addReg(HiReg)
8695       .addReg(ReadAgainReg)
8696       .addMBB(LoopMBB);
8697 
8698   LoopMBB->addSuccessor(LoopMBB);
8699   LoopMBB->addSuccessor(DoneMBB);
8700 
8701   MI.eraseFromParent();
8702 
8703   return DoneMBB;
8704 }
8705 
8706 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8707                                              MachineBasicBlock *BB) {
8708   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8709 
8710   MachineFunction &MF = *BB->getParent();
8711   DebugLoc DL = MI.getDebugLoc();
8712   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8713   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8714   Register LoReg = MI.getOperand(0).getReg();
8715   Register HiReg = MI.getOperand(1).getReg();
8716   Register SrcReg = MI.getOperand(2).getReg();
8717   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8718   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8719 
8720   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8721                           RI);
8722   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8723   MachineMemOperand *MMOLo =
8724       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8725   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8726       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8727   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8728       .addFrameIndex(FI)
8729       .addImm(0)
8730       .addMemOperand(MMOLo);
8731   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8732       .addFrameIndex(FI)
8733       .addImm(4)
8734       .addMemOperand(MMOHi);
8735   MI.eraseFromParent(); // The pseudo instruction is gone now.
8736   return BB;
8737 }
8738 
8739 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8740                                                  MachineBasicBlock *BB) {
8741   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8742          "Unexpected instruction");
8743 
8744   MachineFunction &MF = *BB->getParent();
8745   DebugLoc DL = MI.getDebugLoc();
8746   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8747   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8748   Register DstReg = MI.getOperand(0).getReg();
8749   Register LoReg = MI.getOperand(1).getReg();
8750   Register HiReg = MI.getOperand(2).getReg();
8751   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8752   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8753 
8754   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8755   MachineMemOperand *MMOLo =
8756       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8757   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8758       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8759   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8760       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8761       .addFrameIndex(FI)
8762       .addImm(0)
8763       .addMemOperand(MMOLo);
8764   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8765       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8766       .addFrameIndex(FI)
8767       .addImm(4)
8768       .addMemOperand(MMOHi);
8769   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8770   MI.eraseFromParent(); // The pseudo instruction is gone now.
8771   return BB;
8772 }
8773 
8774 static bool isSelectPseudo(MachineInstr &MI) {
8775   switch (MI.getOpcode()) {
8776   default:
8777     return false;
8778   case RISCV::Select_GPR_Using_CC_GPR:
8779   case RISCV::Select_FPR16_Using_CC_GPR:
8780   case RISCV::Select_FPR32_Using_CC_GPR:
8781   case RISCV::Select_FPR64_Using_CC_GPR:
8782     return true;
8783   }
8784 }
8785 
8786 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8787                                         unsigned RelOpcode, unsigned EqOpcode,
8788                                         const RISCVSubtarget &Subtarget) {
8789   DebugLoc DL = MI.getDebugLoc();
8790   Register DstReg = MI.getOperand(0).getReg();
8791   Register Src1Reg = MI.getOperand(1).getReg();
8792   Register Src2Reg = MI.getOperand(2).getReg();
8793   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8794   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8795   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8796 
8797   // Save the current FFLAGS.
8798   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8799 
8800   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8801                  .addReg(Src1Reg)
8802                  .addReg(Src2Reg);
8803   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8804     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8805 
8806   // Restore the FFLAGS.
8807   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8808       .addReg(SavedFFlags, RegState::Kill);
8809 
8810   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8811   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8812                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8813                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
8814   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8815     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
8816 
8817   // Erase the pseudoinstruction.
8818   MI.eraseFromParent();
8819   return BB;
8820 }
8821 
8822 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
8823                                            MachineBasicBlock *BB,
8824                                            const RISCVSubtarget &Subtarget) {
8825   // To "insert" Select_* instructions, we actually have to insert the triangle
8826   // control-flow pattern.  The incoming instructions know the destination vreg
8827   // to set, the condition code register to branch on, the true/false values to
8828   // select between, and the condcode to use to select the appropriate branch.
8829   //
8830   // We produce the following control flow:
8831   //     HeadMBB
8832   //     |  \
8833   //     |  IfFalseMBB
8834   //     | /
8835   //    TailMBB
8836   //
8837   // When we find a sequence of selects we attempt to optimize their emission
8838   // by sharing the control flow. Currently we only handle cases where we have
8839   // multiple selects with the exact same condition (same LHS, RHS and CC).
8840   // The selects may be interleaved with other instructions if the other
8841   // instructions meet some requirements we deem safe:
8842   // - They are debug instructions. Otherwise,
8843   // - They do not have side-effects, do not access memory and their inputs do
8844   //   not depend on the results of the select pseudo-instructions.
8845   // The TrueV/FalseV operands of the selects cannot depend on the result of
8846   // previous selects in the sequence.
8847   // These conditions could be further relaxed. See the X86 target for a
8848   // related approach and more information.
8849   Register LHS = MI.getOperand(1).getReg();
8850   Register RHS = MI.getOperand(2).getReg();
8851   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
8852 
8853   SmallVector<MachineInstr *, 4> SelectDebugValues;
8854   SmallSet<Register, 4> SelectDests;
8855   SelectDests.insert(MI.getOperand(0).getReg());
8856 
8857   MachineInstr *LastSelectPseudo = &MI;
8858 
8859   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
8860        SequenceMBBI != E; ++SequenceMBBI) {
8861     if (SequenceMBBI->isDebugInstr())
8862       continue;
8863     else if (isSelectPseudo(*SequenceMBBI)) {
8864       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
8865           SequenceMBBI->getOperand(2).getReg() != RHS ||
8866           SequenceMBBI->getOperand(3).getImm() != CC ||
8867           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
8868           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
8869         break;
8870       LastSelectPseudo = &*SequenceMBBI;
8871       SequenceMBBI->collectDebugValues(SelectDebugValues);
8872       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
8873     } else {
8874       if (SequenceMBBI->hasUnmodeledSideEffects() ||
8875           SequenceMBBI->mayLoadOrStore())
8876         break;
8877       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
8878             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8879           }))
8880         break;
8881     }
8882   }
8883 
8884   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
8885   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8886   DebugLoc DL = MI.getDebugLoc();
8887   MachineFunction::iterator I = ++BB->getIterator();
8888 
8889   MachineBasicBlock *HeadMBB = BB;
8890   MachineFunction *F = BB->getParent();
8891   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
8892   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
8893 
8894   F->insert(I, IfFalseMBB);
8895   F->insert(I, TailMBB);
8896 
8897   // Transfer debug instructions associated with the selects to TailMBB.
8898   for (MachineInstr *DebugInstr : SelectDebugValues) {
8899     TailMBB->push_back(DebugInstr->removeFromParent());
8900   }
8901 
8902   // Move all instructions after the sequence to TailMBB.
8903   TailMBB->splice(TailMBB->end(), HeadMBB,
8904                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
8905   // Update machine-CFG edges by transferring all successors of the current
8906   // block to the new block which will contain the Phi nodes for the selects.
8907   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
8908   // Set the successors for HeadMBB.
8909   HeadMBB->addSuccessor(IfFalseMBB);
8910   HeadMBB->addSuccessor(TailMBB);
8911 
8912   // Insert appropriate branch.
8913   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
8914     .addReg(LHS)
8915     .addReg(RHS)
8916     .addMBB(TailMBB);
8917 
8918   // IfFalseMBB just falls through to TailMBB.
8919   IfFalseMBB->addSuccessor(TailMBB);
8920 
8921   // Create PHIs for all of the select pseudo-instructions.
8922   auto SelectMBBI = MI.getIterator();
8923   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
8924   auto InsertionPoint = TailMBB->begin();
8925   while (SelectMBBI != SelectEnd) {
8926     auto Next = std::next(SelectMBBI);
8927     if (isSelectPseudo(*SelectMBBI)) {
8928       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
8929       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
8930               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
8931           .addReg(SelectMBBI->getOperand(4).getReg())
8932           .addMBB(HeadMBB)
8933           .addReg(SelectMBBI->getOperand(5).getReg())
8934           .addMBB(IfFalseMBB);
8935       SelectMBBI->eraseFromParent();
8936     }
8937     SelectMBBI = Next;
8938   }
8939 
8940   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
8941   return TailMBB;
8942 }
8943 
8944 MachineBasicBlock *
8945 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8946                                                  MachineBasicBlock *BB) const {
8947   switch (MI.getOpcode()) {
8948   default:
8949     llvm_unreachable("Unexpected instr type to insert");
8950   case RISCV::ReadCycleWide:
8951     assert(!Subtarget.is64Bit() &&
8952            "ReadCycleWrite is only to be used on riscv32");
8953     return emitReadCycleWidePseudo(MI, BB);
8954   case RISCV::Select_GPR_Using_CC_GPR:
8955   case RISCV::Select_FPR16_Using_CC_GPR:
8956   case RISCV::Select_FPR32_Using_CC_GPR:
8957   case RISCV::Select_FPR64_Using_CC_GPR:
8958     return emitSelectPseudo(MI, BB, Subtarget);
8959   case RISCV::BuildPairF64Pseudo:
8960     return emitBuildPairF64Pseudo(MI, BB);
8961   case RISCV::SplitF64Pseudo:
8962     return emitSplitF64Pseudo(MI, BB);
8963   case RISCV::PseudoQuietFLE_H:
8964     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
8965   case RISCV::PseudoQuietFLT_H:
8966     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
8967   case RISCV::PseudoQuietFLE_S:
8968     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
8969   case RISCV::PseudoQuietFLT_S:
8970     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
8971   case RISCV::PseudoQuietFLE_D:
8972     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
8973   case RISCV::PseudoQuietFLT_D:
8974     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
8975   }
8976 }
8977 
8978 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8979                                                         SDNode *Node) const {
8980   // Add FRM dependency to any instructions with dynamic rounding mode.
8981   unsigned Opc = MI.getOpcode();
8982   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
8983   if (Idx < 0)
8984     return;
8985   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
8986     return;
8987   // If the instruction already reads FRM, don't add another read.
8988   if (MI.readsRegister(RISCV::FRM))
8989     return;
8990   MI.addOperand(
8991       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
8992 }
8993 
8994 // Calling Convention Implementation.
8995 // The expectations for frontend ABI lowering vary from target to target.
8996 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
8997 // details, but this is a longer term goal. For now, we simply try to keep the
8998 // role of the frontend as simple and well-defined as possible. The rules can
8999 // be summarised as:
9000 // * Never split up large scalar arguments. We handle them here.
9001 // * If a hardfloat calling convention is being used, and the struct may be
9002 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9003 // available, then pass as two separate arguments. If either the GPRs or FPRs
9004 // are exhausted, then pass according to the rule below.
9005 // * If a struct could never be passed in registers or directly in a stack
9006 // slot (as it is larger than 2*XLEN and the floating point rules don't
9007 // apply), then pass it using a pointer with the byval attribute.
9008 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9009 // word-sized array or a 2*XLEN scalar (depending on alignment).
9010 // * The frontend can determine whether a struct is returned by reference or
9011 // not based on its size and fields. If it will be returned by reference, the
9012 // frontend must modify the prototype so a pointer with the sret annotation is
9013 // passed as the first argument. This is not necessary for large scalar
9014 // returns.
9015 // * Struct return values and varargs should be coerced to structs containing
9016 // register-size fields in the same situations they would be for fixed
9017 // arguments.
9018 
9019 static const MCPhysReg ArgGPRs[] = {
9020   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9021   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9022 };
9023 static const MCPhysReg ArgFPR16s[] = {
9024   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9025   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9026 };
9027 static const MCPhysReg ArgFPR32s[] = {
9028   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9029   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9030 };
9031 static const MCPhysReg ArgFPR64s[] = {
9032   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9033   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9034 };
9035 // This is an interim calling convention and it may be changed in the future.
9036 static const MCPhysReg ArgVRs[] = {
9037     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9038     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9039     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9040 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9041                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9042                                      RISCV::V20M2, RISCV::V22M2};
9043 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9044                                      RISCV::V20M4};
9045 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9046 
9047 // Pass a 2*XLEN argument that has been split into two XLEN values through
9048 // registers or the stack as necessary.
9049 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9050                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9051                                 MVT ValVT2, MVT LocVT2,
9052                                 ISD::ArgFlagsTy ArgFlags2) {
9053   unsigned XLenInBytes = XLen / 8;
9054   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9055     // At least one half can be passed via register.
9056     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9057                                      VA1.getLocVT(), CCValAssign::Full));
9058   } else {
9059     // Both halves must be passed on the stack, with proper alignment.
9060     Align StackAlign =
9061         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9062     State.addLoc(
9063         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9064                             State.AllocateStack(XLenInBytes, StackAlign),
9065                             VA1.getLocVT(), CCValAssign::Full));
9066     State.addLoc(CCValAssign::getMem(
9067         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9068         LocVT2, CCValAssign::Full));
9069     return false;
9070   }
9071 
9072   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9073     // The second half can also be passed via register.
9074     State.addLoc(
9075         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9076   } else {
9077     // The second half is passed via the stack, without additional alignment.
9078     State.addLoc(CCValAssign::getMem(
9079         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9080         LocVT2, CCValAssign::Full));
9081   }
9082 
9083   return false;
9084 }
9085 
9086 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9087                                Optional<unsigned> FirstMaskArgument,
9088                                CCState &State, const RISCVTargetLowering &TLI) {
9089   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9090   if (RC == &RISCV::VRRegClass) {
9091     // Assign the first mask argument to V0.
9092     // This is an interim calling convention and it may be changed in the
9093     // future.
9094     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9095       return State.AllocateReg(RISCV::V0);
9096     return State.AllocateReg(ArgVRs);
9097   }
9098   if (RC == &RISCV::VRM2RegClass)
9099     return State.AllocateReg(ArgVRM2s);
9100   if (RC == &RISCV::VRM4RegClass)
9101     return State.AllocateReg(ArgVRM4s);
9102   if (RC == &RISCV::VRM8RegClass)
9103     return State.AllocateReg(ArgVRM8s);
9104   llvm_unreachable("Unhandled register class for ValueType");
9105 }
9106 
9107 // Implements the RISC-V calling convention. Returns true upon failure.
9108 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9109                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9110                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9111                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9112                      Optional<unsigned> FirstMaskArgument) {
9113   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9114   assert(XLen == 32 || XLen == 64);
9115   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9116 
9117   // Any return value split in to more than two values can't be returned
9118   // directly. Vectors are returned via the available vector registers.
9119   if (!LocVT.isVector() && IsRet && ValNo > 1)
9120     return true;
9121 
9122   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9123   // variadic argument, or if no F16/F32 argument registers are available.
9124   bool UseGPRForF16_F32 = true;
9125   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9126   // variadic argument, or if no F64 argument registers are available.
9127   bool UseGPRForF64 = true;
9128 
9129   switch (ABI) {
9130   default:
9131     llvm_unreachable("Unexpected ABI");
9132   case RISCVABI::ABI_ILP32:
9133   case RISCVABI::ABI_LP64:
9134     break;
9135   case RISCVABI::ABI_ILP32F:
9136   case RISCVABI::ABI_LP64F:
9137     UseGPRForF16_F32 = !IsFixed;
9138     break;
9139   case RISCVABI::ABI_ILP32D:
9140   case RISCVABI::ABI_LP64D:
9141     UseGPRForF16_F32 = !IsFixed;
9142     UseGPRForF64 = !IsFixed;
9143     break;
9144   }
9145 
9146   // FPR16, FPR32, and FPR64 alias each other.
9147   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9148     UseGPRForF16_F32 = true;
9149     UseGPRForF64 = true;
9150   }
9151 
9152   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9153   // similar local variables rather than directly checking against the target
9154   // ABI.
9155 
9156   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9157     LocVT = XLenVT;
9158     LocInfo = CCValAssign::BCvt;
9159   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9160     LocVT = MVT::i64;
9161     LocInfo = CCValAssign::BCvt;
9162   }
9163 
9164   // If this is a variadic argument, the RISC-V calling convention requires
9165   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9166   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9167   // be used regardless of whether the original argument was split during
9168   // legalisation or not. The argument will not be passed by registers if the
9169   // original type is larger than 2*XLEN, so the register alignment rule does
9170   // not apply.
9171   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9172   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9173       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9174     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9175     // Skip 'odd' register if necessary.
9176     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9177       State.AllocateReg(ArgGPRs);
9178   }
9179 
9180   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9181   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9182       State.getPendingArgFlags();
9183 
9184   assert(PendingLocs.size() == PendingArgFlags.size() &&
9185          "PendingLocs and PendingArgFlags out of sync");
9186 
9187   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9188   // registers are exhausted.
9189   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9190     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9191            "Can't lower f64 if it is split");
9192     // Depending on available argument GPRS, f64 may be passed in a pair of
9193     // GPRs, split between a GPR and the stack, or passed completely on the
9194     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9195     // cases.
9196     Register Reg = State.AllocateReg(ArgGPRs);
9197     LocVT = MVT::i32;
9198     if (!Reg) {
9199       unsigned StackOffset = State.AllocateStack(8, Align(8));
9200       State.addLoc(
9201           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9202       return false;
9203     }
9204     if (!State.AllocateReg(ArgGPRs))
9205       State.AllocateStack(4, Align(4));
9206     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9207     return false;
9208   }
9209 
9210   // Fixed-length vectors are located in the corresponding scalable-vector
9211   // container types.
9212   if (ValVT.isFixedLengthVector())
9213     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9214 
9215   // Split arguments might be passed indirectly, so keep track of the pending
9216   // values. Split vectors are passed via a mix of registers and indirectly, so
9217   // treat them as we would any other argument.
9218   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9219     LocVT = XLenVT;
9220     LocInfo = CCValAssign::Indirect;
9221     PendingLocs.push_back(
9222         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9223     PendingArgFlags.push_back(ArgFlags);
9224     if (!ArgFlags.isSplitEnd()) {
9225       return false;
9226     }
9227   }
9228 
9229   // If the split argument only had two elements, it should be passed directly
9230   // in registers or on the stack.
9231   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9232       PendingLocs.size() <= 2) {
9233     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9234     // Apply the normal calling convention rules to the first half of the
9235     // split argument.
9236     CCValAssign VA = PendingLocs[0];
9237     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9238     PendingLocs.clear();
9239     PendingArgFlags.clear();
9240     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9241                                ArgFlags);
9242   }
9243 
9244   // Allocate to a register if possible, or else a stack slot.
9245   Register Reg;
9246   unsigned StoreSizeBytes = XLen / 8;
9247   Align StackAlign = Align(XLen / 8);
9248 
9249   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9250     Reg = State.AllocateReg(ArgFPR16s);
9251   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9252     Reg = State.AllocateReg(ArgFPR32s);
9253   else if (ValVT == MVT::f64 && !UseGPRForF64)
9254     Reg = State.AllocateReg(ArgFPR64s);
9255   else if (ValVT.isVector()) {
9256     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9257     if (!Reg) {
9258       // For return values, the vector must be passed fully via registers or
9259       // via the stack.
9260       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9261       // but we're using all of them.
9262       if (IsRet)
9263         return true;
9264       // Try using a GPR to pass the address
9265       if ((Reg = State.AllocateReg(ArgGPRs))) {
9266         LocVT = XLenVT;
9267         LocInfo = CCValAssign::Indirect;
9268       } else if (ValVT.isScalableVector()) {
9269         LocVT = XLenVT;
9270         LocInfo = CCValAssign::Indirect;
9271       } else {
9272         // Pass fixed-length vectors on the stack.
9273         LocVT = ValVT;
9274         StoreSizeBytes = ValVT.getStoreSize();
9275         // Align vectors to their element sizes, being careful for vXi1
9276         // vectors.
9277         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9278       }
9279     }
9280   } else {
9281     Reg = State.AllocateReg(ArgGPRs);
9282   }
9283 
9284   unsigned StackOffset =
9285       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9286 
9287   // If we reach this point and PendingLocs is non-empty, we must be at the
9288   // end of a split argument that must be passed indirectly.
9289   if (!PendingLocs.empty()) {
9290     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9291     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9292 
9293     for (auto &It : PendingLocs) {
9294       if (Reg)
9295         It.convertToReg(Reg);
9296       else
9297         It.convertToMem(StackOffset);
9298       State.addLoc(It);
9299     }
9300     PendingLocs.clear();
9301     PendingArgFlags.clear();
9302     return false;
9303   }
9304 
9305   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9306           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9307          "Expected an XLenVT or vector types at this stage");
9308 
9309   if (Reg) {
9310     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9311     return false;
9312   }
9313 
9314   // When a floating-point value is passed on the stack, no bit-conversion is
9315   // needed.
9316   if (ValVT.isFloatingPoint()) {
9317     LocVT = ValVT;
9318     LocInfo = CCValAssign::Full;
9319   }
9320   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9321   return false;
9322 }
9323 
9324 template <typename ArgTy>
9325 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9326   for (const auto &ArgIdx : enumerate(Args)) {
9327     MVT ArgVT = ArgIdx.value().VT;
9328     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9329       return ArgIdx.index();
9330   }
9331   return None;
9332 }
9333 
9334 void RISCVTargetLowering::analyzeInputArgs(
9335     MachineFunction &MF, CCState &CCInfo,
9336     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9337     RISCVCCAssignFn Fn) const {
9338   unsigned NumArgs = Ins.size();
9339   FunctionType *FType = MF.getFunction().getFunctionType();
9340 
9341   Optional<unsigned> FirstMaskArgument;
9342   if (Subtarget.hasVInstructions())
9343     FirstMaskArgument = preAssignMask(Ins);
9344 
9345   for (unsigned i = 0; i != NumArgs; ++i) {
9346     MVT ArgVT = Ins[i].VT;
9347     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9348 
9349     Type *ArgTy = nullptr;
9350     if (IsRet)
9351       ArgTy = FType->getReturnType();
9352     else if (Ins[i].isOrigArg())
9353       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9354 
9355     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9356     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9357            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9358            FirstMaskArgument)) {
9359       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9360                         << EVT(ArgVT).getEVTString() << '\n');
9361       llvm_unreachable(nullptr);
9362     }
9363   }
9364 }
9365 
9366 void RISCVTargetLowering::analyzeOutputArgs(
9367     MachineFunction &MF, CCState &CCInfo,
9368     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9369     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9370   unsigned NumArgs = Outs.size();
9371 
9372   Optional<unsigned> FirstMaskArgument;
9373   if (Subtarget.hasVInstructions())
9374     FirstMaskArgument = preAssignMask(Outs);
9375 
9376   for (unsigned i = 0; i != NumArgs; i++) {
9377     MVT ArgVT = Outs[i].VT;
9378     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9379     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9380 
9381     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9382     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9383            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9384            FirstMaskArgument)) {
9385       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9386                         << EVT(ArgVT).getEVTString() << "\n");
9387       llvm_unreachable(nullptr);
9388     }
9389   }
9390 }
9391 
9392 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9393 // values.
9394 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9395                                    const CCValAssign &VA, const SDLoc &DL,
9396                                    const RISCVSubtarget &Subtarget) {
9397   switch (VA.getLocInfo()) {
9398   default:
9399     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9400   case CCValAssign::Full:
9401     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9402       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9403     break;
9404   case CCValAssign::BCvt:
9405     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9406       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9407     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9408       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9409     else
9410       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9411     break;
9412   }
9413   return Val;
9414 }
9415 
9416 // The caller is responsible for loading the full value if the argument is
9417 // passed with CCValAssign::Indirect.
9418 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9419                                 const CCValAssign &VA, const SDLoc &DL,
9420                                 const RISCVTargetLowering &TLI) {
9421   MachineFunction &MF = DAG.getMachineFunction();
9422   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9423   EVT LocVT = VA.getLocVT();
9424   SDValue Val;
9425   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9426   Register VReg = RegInfo.createVirtualRegister(RC);
9427   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9428   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9429 
9430   if (VA.getLocInfo() == CCValAssign::Indirect)
9431     return Val;
9432 
9433   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9434 }
9435 
9436 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9437                                    const CCValAssign &VA, const SDLoc &DL,
9438                                    const RISCVSubtarget &Subtarget) {
9439   EVT LocVT = VA.getLocVT();
9440 
9441   switch (VA.getLocInfo()) {
9442   default:
9443     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9444   case CCValAssign::Full:
9445     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9446       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9447     break;
9448   case CCValAssign::BCvt:
9449     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9450       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9451     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9452       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9453     else
9454       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9455     break;
9456   }
9457   return Val;
9458 }
9459 
9460 // The caller is responsible for loading the full value if the argument is
9461 // passed with CCValAssign::Indirect.
9462 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9463                                 const CCValAssign &VA, const SDLoc &DL) {
9464   MachineFunction &MF = DAG.getMachineFunction();
9465   MachineFrameInfo &MFI = MF.getFrameInfo();
9466   EVT LocVT = VA.getLocVT();
9467   EVT ValVT = VA.getValVT();
9468   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9469   if (ValVT.isScalableVector()) {
9470     // When the value is a scalable vector, we save the pointer which points to
9471     // the scalable vector value in the stack. The ValVT will be the pointer
9472     // type, instead of the scalable vector type.
9473     ValVT = LocVT;
9474   }
9475   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9476                                  /*IsImmutable=*/true);
9477   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9478   SDValue Val;
9479 
9480   ISD::LoadExtType ExtType;
9481   switch (VA.getLocInfo()) {
9482   default:
9483     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9484   case CCValAssign::Full:
9485   case CCValAssign::Indirect:
9486   case CCValAssign::BCvt:
9487     ExtType = ISD::NON_EXTLOAD;
9488     break;
9489   }
9490   Val = DAG.getExtLoad(
9491       ExtType, DL, LocVT, Chain, FIN,
9492       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9493   return Val;
9494 }
9495 
9496 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9497                                        const CCValAssign &VA, const SDLoc &DL) {
9498   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9499          "Unexpected VA");
9500   MachineFunction &MF = DAG.getMachineFunction();
9501   MachineFrameInfo &MFI = MF.getFrameInfo();
9502   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9503 
9504   if (VA.isMemLoc()) {
9505     // f64 is passed on the stack.
9506     int FI =
9507         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9508     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9509     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9510                        MachinePointerInfo::getFixedStack(MF, FI));
9511   }
9512 
9513   assert(VA.isRegLoc() && "Expected register VA assignment");
9514 
9515   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9516   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9517   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9518   SDValue Hi;
9519   if (VA.getLocReg() == RISCV::X17) {
9520     // Second half of f64 is passed on the stack.
9521     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9522     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9523     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9524                      MachinePointerInfo::getFixedStack(MF, FI));
9525   } else {
9526     // Second half of f64 is passed in another GPR.
9527     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9528     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9529     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9530   }
9531   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9532 }
9533 
9534 // FastCC has less than 1% performance improvement for some particular
9535 // benchmark. But theoretically, it may has benenfit for some cases.
9536 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9537                             unsigned ValNo, MVT ValVT, MVT LocVT,
9538                             CCValAssign::LocInfo LocInfo,
9539                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9540                             bool IsFixed, bool IsRet, Type *OrigTy,
9541                             const RISCVTargetLowering &TLI,
9542                             Optional<unsigned> FirstMaskArgument) {
9543 
9544   // X5 and X6 might be used for save-restore libcall.
9545   static const MCPhysReg GPRList[] = {
9546       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9547       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9548       RISCV::X29, RISCV::X30, RISCV::X31};
9549 
9550   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9551     if (unsigned Reg = State.AllocateReg(GPRList)) {
9552       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9553       return false;
9554     }
9555   }
9556 
9557   if (LocVT == MVT::f16) {
9558     static const MCPhysReg FPR16List[] = {
9559         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9560         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9561         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9562         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9563     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9564       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9565       return false;
9566     }
9567   }
9568 
9569   if (LocVT == MVT::f32) {
9570     static const MCPhysReg FPR32List[] = {
9571         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9572         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9573         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9574         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9575     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9576       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9577       return false;
9578     }
9579   }
9580 
9581   if (LocVT == MVT::f64) {
9582     static const MCPhysReg FPR64List[] = {
9583         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9584         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9585         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9586         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9587     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9588       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9589       return false;
9590     }
9591   }
9592 
9593   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9594     unsigned Offset4 = State.AllocateStack(4, Align(4));
9595     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9596     return false;
9597   }
9598 
9599   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9600     unsigned Offset5 = State.AllocateStack(8, Align(8));
9601     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9602     return false;
9603   }
9604 
9605   if (LocVT.isVector()) {
9606     if (unsigned Reg =
9607             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9608       // Fixed-length vectors are located in the corresponding scalable-vector
9609       // container types.
9610       if (ValVT.isFixedLengthVector())
9611         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9612       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9613     } else {
9614       // Try and pass the address via a "fast" GPR.
9615       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9616         LocInfo = CCValAssign::Indirect;
9617         LocVT = TLI.getSubtarget().getXLenVT();
9618         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9619       } else if (ValVT.isFixedLengthVector()) {
9620         auto StackAlign =
9621             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9622         unsigned StackOffset =
9623             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9624         State.addLoc(
9625             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9626       } else {
9627         // Can't pass scalable vectors on the stack.
9628         return true;
9629       }
9630     }
9631 
9632     return false;
9633   }
9634 
9635   return true; // CC didn't match.
9636 }
9637 
9638 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9639                          CCValAssign::LocInfo LocInfo,
9640                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9641 
9642   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9643     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9644     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9645     static const MCPhysReg GPRList[] = {
9646         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9647         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9648     if (unsigned Reg = State.AllocateReg(GPRList)) {
9649       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9650       return false;
9651     }
9652   }
9653 
9654   if (LocVT == MVT::f32) {
9655     // Pass in STG registers: F1, ..., F6
9656     //                        fs0 ... fs5
9657     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9658                                           RISCV::F18_F, RISCV::F19_F,
9659                                           RISCV::F20_F, RISCV::F21_F};
9660     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9661       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9662       return false;
9663     }
9664   }
9665 
9666   if (LocVT == MVT::f64) {
9667     // Pass in STG registers: D1, ..., D6
9668     //                        fs6 ... fs11
9669     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9670                                           RISCV::F24_D, RISCV::F25_D,
9671                                           RISCV::F26_D, RISCV::F27_D};
9672     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9673       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9674       return false;
9675     }
9676   }
9677 
9678   report_fatal_error("No registers left in GHC calling convention");
9679   return true;
9680 }
9681 
9682 // Transform physical registers into virtual registers.
9683 SDValue RISCVTargetLowering::LowerFormalArguments(
9684     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9685     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9686     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9687 
9688   MachineFunction &MF = DAG.getMachineFunction();
9689 
9690   switch (CallConv) {
9691   default:
9692     report_fatal_error("Unsupported calling convention");
9693   case CallingConv::C:
9694   case CallingConv::Fast:
9695     break;
9696   case CallingConv::GHC:
9697     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9698         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9699       report_fatal_error(
9700         "GHC calling convention requires the F and D instruction set extensions");
9701   }
9702 
9703   const Function &Func = MF.getFunction();
9704   if (Func.hasFnAttribute("interrupt")) {
9705     if (!Func.arg_empty())
9706       report_fatal_error(
9707         "Functions with the interrupt attribute cannot have arguments!");
9708 
9709     StringRef Kind =
9710       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9711 
9712     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9713       report_fatal_error(
9714         "Function interrupt attribute argument not supported!");
9715   }
9716 
9717   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9718   MVT XLenVT = Subtarget.getXLenVT();
9719   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9720   // Used with vargs to acumulate store chains.
9721   std::vector<SDValue> OutChains;
9722 
9723   // Assign locations to all of the incoming arguments.
9724   SmallVector<CCValAssign, 16> ArgLocs;
9725   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9726 
9727   if (CallConv == CallingConv::GHC)
9728     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9729   else
9730     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9731                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9732                                                    : CC_RISCV);
9733 
9734   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9735     CCValAssign &VA = ArgLocs[i];
9736     SDValue ArgValue;
9737     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9738     // case.
9739     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9740       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9741     else if (VA.isRegLoc())
9742       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9743     else
9744       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9745 
9746     if (VA.getLocInfo() == CCValAssign::Indirect) {
9747       // If the original argument was split and passed by reference (e.g. i128
9748       // on RV32), we need to load all parts of it here (using the same
9749       // address). Vectors may be partly split to registers and partly to the
9750       // stack, in which case the base address is partly offset and subsequent
9751       // stores are relative to that.
9752       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9753                                    MachinePointerInfo()));
9754       unsigned ArgIndex = Ins[i].OrigArgIndex;
9755       unsigned ArgPartOffset = Ins[i].PartOffset;
9756       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9757       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9758         CCValAssign &PartVA = ArgLocs[i + 1];
9759         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9760         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9761         if (PartVA.getValVT().isScalableVector())
9762           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9763         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9764         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9765                                      MachinePointerInfo()));
9766         ++i;
9767       }
9768       continue;
9769     }
9770     InVals.push_back(ArgValue);
9771   }
9772 
9773   if (IsVarArg) {
9774     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9775     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9776     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9777     MachineFrameInfo &MFI = MF.getFrameInfo();
9778     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9779     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9780 
9781     // Offset of the first variable argument from stack pointer, and size of
9782     // the vararg save area. For now, the varargs save area is either zero or
9783     // large enough to hold a0-a7.
9784     int VaArgOffset, VarArgsSaveSize;
9785 
9786     // If all registers are allocated, then all varargs must be passed on the
9787     // stack and we don't need to save any argregs.
9788     if (ArgRegs.size() == Idx) {
9789       VaArgOffset = CCInfo.getNextStackOffset();
9790       VarArgsSaveSize = 0;
9791     } else {
9792       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9793       VaArgOffset = -VarArgsSaveSize;
9794     }
9795 
9796     // Record the frame index of the first variable argument
9797     // which is a value necessary to VASTART.
9798     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9799     RVFI->setVarArgsFrameIndex(FI);
9800 
9801     // If saving an odd number of registers then create an extra stack slot to
9802     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9803     // offsets to even-numbered registered remain 2*XLEN-aligned.
9804     if (Idx % 2) {
9805       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9806       VarArgsSaveSize += XLenInBytes;
9807     }
9808 
9809     // Copy the integer registers that may have been used for passing varargs
9810     // to the vararg save area.
9811     for (unsigned I = Idx; I < ArgRegs.size();
9812          ++I, VaArgOffset += XLenInBytes) {
9813       const Register Reg = RegInfo.createVirtualRegister(RC);
9814       RegInfo.addLiveIn(ArgRegs[I], Reg);
9815       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
9816       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9817       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9818       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
9819                                    MachinePointerInfo::getFixedStack(MF, FI));
9820       cast<StoreSDNode>(Store.getNode())
9821           ->getMemOperand()
9822           ->setValue((Value *)nullptr);
9823       OutChains.push_back(Store);
9824     }
9825     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
9826   }
9827 
9828   // All stores are grouped in one node to allow the matching between
9829   // the size of Ins and InVals. This only happens for vararg functions.
9830   if (!OutChains.empty()) {
9831     OutChains.push_back(Chain);
9832     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
9833   }
9834 
9835   return Chain;
9836 }
9837 
9838 /// isEligibleForTailCallOptimization - Check whether the call is eligible
9839 /// for tail call optimization.
9840 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9841 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9842     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9843     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9844 
9845   auto &Callee = CLI.Callee;
9846   auto CalleeCC = CLI.CallConv;
9847   auto &Outs = CLI.Outs;
9848   auto &Caller = MF.getFunction();
9849   auto CallerCC = Caller.getCallingConv();
9850 
9851   // Exception-handling functions need a special set of instructions to
9852   // indicate a return to the hardware. Tail-calling another function would
9853   // probably break this.
9854   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
9855   // should be expanded as new function attributes are introduced.
9856   if (Caller.hasFnAttribute("interrupt"))
9857     return false;
9858 
9859   // Do not tail call opt if the stack is used to pass parameters.
9860   if (CCInfo.getNextStackOffset() != 0)
9861     return false;
9862 
9863   // Do not tail call opt if any parameters need to be passed indirectly.
9864   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
9865   // passed indirectly. So the address of the value will be passed in a
9866   // register, or if not available, then the address is put on the stack. In
9867   // order to pass indirectly, space on the stack often needs to be allocated
9868   // in order to store the value. In this case the CCInfo.getNextStackOffset()
9869   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
9870   // are passed CCValAssign::Indirect.
9871   for (auto &VA : ArgLocs)
9872     if (VA.getLocInfo() == CCValAssign::Indirect)
9873       return false;
9874 
9875   // Do not tail call opt if either caller or callee uses struct return
9876   // semantics.
9877   auto IsCallerStructRet = Caller.hasStructRetAttr();
9878   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
9879   if (IsCallerStructRet || IsCalleeStructRet)
9880     return false;
9881 
9882   // Externally-defined functions with weak linkage should not be
9883   // tail-called. The behaviour of branch instructions in this situation (as
9884   // used for tail calls) is implementation-defined, so we cannot rely on the
9885   // linker replacing the tail call with a return.
9886   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
9887     const GlobalValue *GV = G->getGlobal();
9888     if (GV->hasExternalWeakLinkage())
9889       return false;
9890   }
9891 
9892   // The callee has to preserve all registers the caller needs to preserve.
9893   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9894   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9895   if (CalleeCC != CallerCC) {
9896     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9897     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9898       return false;
9899   }
9900 
9901   // Byval parameters hand the function a pointer directly into the stack area
9902   // we want to reuse during a tail call. Working around this *is* possible
9903   // but less efficient and uglier in LowerCall.
9904   for (auto &Arg : Outs)
9905     if (Arg.Flags.isByVal())
9906       return false;
9907 
9908   return true;
9909 }
9910 
9911 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
9912   return DAG.getDataLayout().getPrefTypeAlign(
9913       VT.getTypeForEVT(*DAG.getContext()));
9914 }
9915 
9916 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
9917 // and output parameter nodes.
9918 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
9919                                        SmallVectorImpl<SDValue> &InVals) const {
9920   SelectionDAG &DAG = CLI.DAG;
9921   SDLoc &DL = CLI.DL;
9922   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
9923   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
9924   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
9925   SDValue Chain = CLI.Chain;
9926   SDValue Callee = CLI.Callee;
9927   bool &IsTailCall = CLI.IsTailCall;
9928   CallingConv::ID CallConv = CLI.CallConv;
9929   bool IsVarArg = CLI.IsVarArg;
9930   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9931   MVT XLenVT = Subtarget.getXLenVT();
9932 
9933   MachineFunction &MF = DAG.getMachineFunction();
9934 
9935   // Analyze the operands of the call, assigning locations to each operand.
9936   SmallVector<CCValAssign, 16> ArgLocs;
9937   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9938 
9939   if (CallConv == CallingConv::GHC)
9940     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
9941   else
9942     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
9943                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9944                                                     : CC_RISCV);
9945 
9946   // Check if it's really possible to do a tail call.
9947   if (IsTailCall)
9948     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9949 
9950   if (IsTailCall)
9951     ++NumTailCalls;
9952   else if (CLI.CB && CLI.CB->isMustTailCall())
9953     report_fatal_error("failed to perform tail call elimination on a call "
9954                        "site marked musttail");
9955 
9956   // Get a count of how many bytes are to be pushed on the stack.
9957   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
9958 
9959   // Create local copies for byval args
9960   SmallVector<SDValue, 8> ByValArgs;
9961   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9962     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9963     if (!Flags.isByVal())
9964       continue;
9965 
9966     SDValue Arg = OutVals[i];
9967     unsigned Size = Flags.getByValSize();
9968     Align Alignment = Flags.getNonZeroByValAlign();
9969 
9970     int FI =
9971         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
9972     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9973     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
9974 
9975     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
9976                           /*IsVolatile=*/false,
9977                           /*AlwaysInline=*/false, IsTailCall,
9978                           MachinePointerInfo(), MachinePointerInfo());
9979     ByValArgs.push_back(FIPtr);
9980   }
9981 
9982   if (!IsTailCall)
9983     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
9984 
9985   // Copy argument values to their designated locations.
9986   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
9987   SmallVector<SDValue, 8> MemOpChains;
9988   SDValue StackPtr;
9989   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
9990     CCValAssign &VA = ArgLocs[i];
9991     SDValue ArgValue = OutVals[i];
9992     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9993 
9994     // Handle passing f64 on RV32D with a soft float ABI as a special case.
9995     bool IsF64OnRV32DSoftABI =
9996         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
9997     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
9998       SDValue SplitF64 = DAG.getNode(
9999           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10000       SDValue Lo = SplitF64.getValue(0);
10001       SDValue Hi = SplitF64.getValue(1);
10002 
10003       Register RegLo = VA.getLocReg();
10004       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10005 
10006       if (RegLo == RISCV::X17) {
10007         // Second half of f64 is passed on the stack.
10008         // Work out the address of the stack slot.
10009         if (!StackPtr.getNode())
10010           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10011         // Emit the store.
10012         MemOpChains.push_back(
10013             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10014       } else {
10015         // Second half of f64 is passed in another GPR.
10016         assert(RegLo < RISCV::X31 && "Invalid register pair");
10017         Register RegHigh = RegLo + 1;
10018         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10019       }
10020       continue;
10021     }
10022 
10023     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10024     // as any other MemLoc.
10025 
10026     // Promote the value if needed.
10027     // For now, only handle fully promoted and indirect arguments.
10028     if (VA.getLocInfo() == CCValAssign::Indirect) {
10029       // Store the argument in a stack slot and pass its address.
10030       Align StackAlign =
10031           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10032                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10033       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10034       // If the original argument was split (e.g. i128), we need
10035       // to store the required parts of it here (and pass just one address).
10036       // Vectors may be partly split to registers and partly to the stack, in
10037       // which case the base address is partly offset and subsequent stores are
10038       // relative to that.
10039       unsigned ArgIndex = Outs[i].OrigArgIndex;
10040       unsigned ArgPartOffset = Outs[i].PartOffset;
10041       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10042       // Calculate the total size to store. We don't have access to what we're
10043       // actually storing other than performing the loop and collecting the
10044       // info.
10045       SmallVector<std::pair<SDValue, SDValue>> Parts;
10046       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10047         SDValue PartValue = OutVals[i + 1];
10048         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10049         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10050         EVT PartVT = PartValue.getValueType();
10051         if (PartVT.isScalableVector())
10052           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10053         StoredSize += PartVT.getStoreSize();
10054         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10055         Parts.push_back(std::make_pair(PartValue, Offset));
10056         ++i;
10057       }
10058       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10059       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10060       MemOpChains.push_back(
10061           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10062                        MachinePointerInfo::getFixedStack(MF, FI)));
10063       for (const auto &Part : Parts) {
10064         SDValue PartValue = Part.first;
10065         SDValue PartOffset = Part.second;
10066         SDValue Address =
10067             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10068         MemOpChains.push_back(
10069             DAG.getStore(Chain, DL, PartValue, Address,
10070                          MachinePointerInfo::getFixedStack(MF, FI)));
10071       }
10072       ArgValue = SpillSlot;
10073     } else {
10074       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10075     }
10076 
10077     // Use local copy if it is a byval arg.
10078     if (Flags.isByVal())
10079       ArgValue = ByValArgs[j++];
10080 
10081     if (VA.isRegLoc()) {
10082       // Queue up the argument copies and emit them at the end.
10083       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10084     } else {
10085       assert(VA.isMemLoc() && "Argument not register or memory");
10086       assert(!IsTailCall && "Tail call not allowed if stack is used "
10087                             "for passing parameters");
10088 
10089       // Work out the address of the stack slot.
10090       if (!StackPtr.getNode())
10091         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10092       SDValue Address =
10093           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10094                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10095 
10096       // Emit the store.
10097       MemOpChains.push_back(
10098           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10099     }
10100   }
10101 
10102   // Join the stores, which are independent of one another.
10103   if (!MemOpChains.empty())
10104     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10105 
10106   SDValue Glue;
10107 
10108   // Build a sequence of copy-to-reg nodes, chained and glued together.
10109   for (auto &Reg : RegsToPass) {
10110     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10111     Glue = Chain.getValue(1);
10112   }
10113 
10114   // Validate that none of the argument registers have been marked as
10115   // reserved, if so report an error. Do the same for the return address if this
10116   // is not a tailcall.
10117   validateCCReservedRegs(RegsToPass, MF);
10118   if (!IsTailCall &&
10119       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10120     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10121         MF.getFunction(),
10122         "Return address register required, but has been reserved."});
10123 
10124   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10125   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10126   // split it and then direct call can be matched by PseudoCALL.
10127   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10128     const GlobalValue *GV = S->getGlobal();
10129 
10130     unsigned OpFlags = RISCVII::MO_CALL;
10131     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10132       OpFlags = RISCVII::MO_PLT;
10133 
10134     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10135   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10136     unsigned OpFlags = RISCVII::MO_CALL;
10137 
10138     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10139                                                  nullptr))
10140       OpFlags = RISCVII::MO_PLT;
10141 
10142     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10143   }
10144 
10145   // The first call operand is the chain and the second is the target address.
10146   SmallVector<SDValue, 8> Ops;
10147   Ops.push_back(Chain);
10148   Ops.push_back(Callee);
10149 
10150   // Add argument registers to the end of the list so that they are
10151   // known live into the call.
10152   for (auto &Reg : RegsToPass)
10153     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10154 
10155   if (!IsTailCall) {
10156     // Add a register mask operand representing the call-preserved registers.
10157     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10158     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10159     assert(Mask && "Missing call preserved mask for calling convention");
10160     Ops.push_back(DAG.getRegisterMask(Mask));
10161   }
10162 
10163   // Glue the call to the argument copies, if any.
10164   if (Glue.getNode())
10165     Ops.push_back(Glue);
10166 
10167   // Emit the call.
10168   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10169 
10170   if (IsTailCall) {
10171     MF.getFrameInfo().setHasTailCall();
10172     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10173   }
10174 
10175   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10176   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10177   Glue = Chain.getValue(1);
10178 
10179   // Mark the end of the call, which is glued to the call itself.
10180   Chain = DAG.getCALLSEQ_END(Chain,
10181                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10182                              DAG.getConstant(0, DL, PtrVT, true),
10183                              Glue, DL);
10184   Glue = Chain.getValue(1);
10185 
10186   // Assign locations to each value returned by this call.
10187   SmallVector<CCValAssign, 16> RVLocs;
10188   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10189   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10190 
10191   // Copy all of the result registers out of their specified physreg.
10192   for (auto &VA : RVLocs) {
10193     // Copy the value out
10194     SDValue RetValue =
10195         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10196     // Glue the RetValue to the end of the call sequence
10197     Chain = RetValue.getValue(1);
10198     Glue = RetValue.getValue(2);
10199 
10200     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10201       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10202       SDValue RetValue2 =
10203           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10204       Chain = RetValue2.getValue(1);
10205       Glue = RetValue2.getValue(2);
10206       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10207                              RetValue2);
10208     }
10209 
10210     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10211 
10212     InVals.push_back(RetValue);
10213   }
10214 
10215   return Chain;
10216 }
10217 
10218 bool RISCVTargetLowering::CanLowerReturn(
10219     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10220     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10221   SmallVector<CCValAssign, 16> RVLocs;
10222   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10223 
10224   Optional<unsigned> FirstMaskArgument;
10225   if (Subtarget.hasVInstructions())
10226     FirstMaskArgument = preAssignMask(Outs);
10227 
10228   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10229     MVT VT = Outs[i].VT;
10230     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10231     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10232     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10233                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10234                  *this, FirstMaskArgument))
10235       return false;
10236   }
10237   return true;
10238 }
10239 
10240 SDValue
10241 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10242                                  bool IsVarArg,
10243                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10244                                  const SmallVectorImpl<SDValue> &OutVals,
10245                                  const SDLoc &DL, SelectionDAG &DAG) const {
10246   const MachineFunction &MF = DAG.getMachineFunction();
10247   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10248 
10249   // Stores the assignment of the return value to a location.
10250   SmallVector<CCValAssign, 16> RVLocs;
10251 
10252   // Info about the registers and stack slot.
10253   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10254                  *DAG.getContext());
10255 
10256   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10257                     nullptr, CC_RISCV);
10258 
10259   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10260     report_fatal_error("GHC functions return void only");
10261 
10262   SDValue Glue;
10263   SmallVector<SDValue, 4> RetOps(1, Chain);
10264 
10265   // Copy the result values into the output registers.
10266   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10267     SDValue Val = OutVals[i];
10268     CCValAssign &VA = RVLocs[i];
10269     assert(VA.isRegLoc() && "Can only return in registers!");
10270 
10271     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10272       // Handle returning f64 on RV32D with a soft float ABI.
10273       assert(VA.isRegLoc() && "Expected return via registers");
10274       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10275                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10276       SDValue Lo = SplitF64.getValue(0);
10277       SDValue Hi = SplitF64.getValue(1);
10278       Register RegLo = VA.getLocReg();
10279       assert(RegLo < RISCV::X31 && "Invalid register pair");
10280       Register RegHi = RegLo + 1;
10281 
10282       if (STI.isRegisterReservedByUser(RegLo) ||
10283           STI.isRegisterReservedByUser(RegHi))
10284         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10285             MF.getFunction(),
10286             "Return value register required, but has been reserved."});
10287 
10288       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10289       Glue = Chain.getValue(1);
10290       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10291       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10292       Glue = Chain.getValue(1);
10293       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10294     } else {
10295       // Handle a 'normal' return.
10296       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10297       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10298 
10299       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10300         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10301             MF.getFunction(),
10302             "Return value register required, but has been reserved."});
10303 
10304       // Guarantee that all emitted copies are stuck together.
10305       Glue = Chain.getValue(1);
10306       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10307     }
10308   }
10309 
10310   RetOps[0] = Chain; // Update chain.
10311 
10312   // Add the glue node if we have it.
10313   if (Glue.getNode()) {
10314     RetOps.push_back(Glue);
10315   }
10316 
10317   unsigned RetOpc = RISCVISD::RET_FLAG;
10318   // Interrupt service routines use different return instructions.
10319   const Function &Func = DAG.getMachineFunction().getFunction();
10320   if (Func.hasFnAttribute("interrupt")) {
10321     if (!Func.getReturnType()->isVoidTy())
10322       report_fatal_error(
10323           "Functions with the interrupt attribute must have void return type!");
10324 
10325     MachineFunction &MF = DAG.getMachineFunction();
10326     StringRef Kind =
10327       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10328 
10329     if (Kind == "user")
10330       RetOpc = RISCVISD::URET_FLAG;
10331     else if (Kind == "supervisor")
10332       RetOpc = RISCVISD::SRET_FLAG;
10333     else
10334       RetOpc = RISCVISD::MRET_FLAG;
10335   }
10336 
10337   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10338 }
10339 
10340 void RISCVTargetLowering::validateCCReservedRegs(
10341     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10342     MachineFunction &MF) const {
10343   const Function &F = MF.getFunction();
10344   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10345 
10346   if (llvm::any_of(Regs, [&STI](auto Reg) {
10347         return STI.isRegisterReservedByUser(Reg.first);
10348       }))
10349     F.getContext().diagnose(DiagnosticInfoUnsupported{
10350         F, "Argument register required, but has been reserved."});
10351 }
10352 
10353 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10354   return CI->isTailCall();
10355 }
10356 
10357 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10358 #define NODE_NAME_CASE(NODE)                                                   \
10359   case RISCVISD::NODE:                                                         \
10360     return "RISCVISD::" #NODE;
10361   // clang-format off
10362   switch ((RISCVISD::NodeType)Opcode) {
10363   case RISCVISD::FIRST_NUMBER:
10364     break;
10365   NODE_NAME_CASE(RET_FLAG)
10366   NODE_NAME_CASE(URET_FLAG)
10367   NODE_NAME_CASE(SRET_FLAG)
10368   NODE_NAME_CASE(MRET_FLAG)
10369   NODE_NAME_CASE(CALL)
10370   NODE_NAME_CASE(SELECT_CC)
10371   NODE_NAME_CASE(BR_CC)
10372   NODE_NAME_CASE(BuildPairF64)
10373   NODE_NAME_CASE(SplitF64)
10374   NODE_NAME_CASE(TAIL)
10375   NODE_NAME_CASE(MULHSU)
10376   NODE_NAME_CASE(SLLW)
10377   NODE_NAME_CASE(SRAW)
10378   NODE_NAME_CASE(SRLW)
10379   NODE_NAME_CASE(DIVW)
10380   NODE_NAME_CASE(DIVUW)
10381   NODE_NAME_CASE(REMUW)
10382   NODE_NAME_CASE(ROLW)
10383   NODE_NAME_CASE(RORW)
10384   NODE_NAME_CASE(CLZW)
10385   NODE_NAME_CASE(CTZW)
10386   NODE_NAME_CASE(FSLW)
10387   NODE_NAME_CASE(FSRW)
10388   NODE_NAME_CASE(FSL)
10389   NODE_NAME_CASE(FSR)
10390   NODE_NAME_CASE(FMV_H_X)
10391   NODE_NAME_CASE(FMV_X_ANYEXTH)
10392   NODE_NAME_CASE(FMV_W_X_RV64)
10393   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10394   NODE_NAME_CASE(FCVT_X)
10395   NODE_NAME_CASE(FCVT_XU)
10396   NODE_NAME_CASE(FCVT_W_RV64)
10397   NODE_NAME_CASE(FCVT_WU_RV64)
10398   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10399   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10400   NODE_NAME_CASE(READ_CYCLE_WIDE)
10401   NODE_NAME_CASE(GREV)
10402   NODE_NAME_CASE(GREVW)
10403   NODE_NAME_CASE(GORC)
10404   NODE_NAME_CASE(GORCW)
10405   NODE_NAME_CASE(SHFL)
10406   NODE_NAME_CASE(SHFLW)
10407   NODE_NAME_CASE(UNSHFL)
10408   NODE_NAME_CASE(UNSHFLW)
10409   NODE_NAME_CASE(BFP)
10410   NODE_NAME_CASE(BFPW)
10411   NODE_NAME_CASE(BCOMPRESS)
10412   NODE_NAME_CASE(BCOMPRESSW)
10413   NODE_NAME_CASE(BDECOMPRESS)
10414   NODE_NAME_CASE(BDECOMPRESSW)
10415   NODE_NAME_CASE(VMV_V_X_VL)
10416   NODE_NAME_CASE(VFMV_V_F_VL)
10417   NODE_NAME_CASE(VMV_X_S)
10418   NODE_NAME_CASE(VMV_S_X_VL)
10419   NODE_NAME_CASE(VFMV_S_F_VL)
10420   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10421   NODE_NAME_CASE(READ_VLENB)
10422   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10423   NODE_NAME_CASE(VSLIDEUP_VL)
10424   NODE_NAME_CASE(VSLIDE1UP_VL)
10425   NODE_NAME_CASE(VSLIDEDOWN_VL)
10426   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10427   NODE_NAME_CASE(VID_VL)
10428   NODE_NAME_CASE(VFNCVT_ROD_VL)
10429   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10430   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10431   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10432   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10433   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10434   NODE_NAME_CASE(VECREDUCE_AND_VL)
10435   NODE_NAME_CASE(VECREDUCE_OR_VL)
10436   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10437   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10438   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10439   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10440   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10441   NODE_NAME_CASE(ADD_VL)
10442   NODE_NAME_CASE(AND_VL)
10443   NODE_NAME_CASE(MUL_VL)
10444   NODE_NAME_CASE(OR_VL)
10445   NODE_NAME_CASE(SDIV_VL)
10446   NODE_NAME_CASE(SHL_VL)
10447   NODE_NAME_CASE(SREM_VL)
10448   NODE_NAME_CASE(SRA_VL)
10449   NODE_NAME_CASE(SRL_VL)
10450   NODE_NAME_CASE(SUB_VL)
10451   NODE_NAME_CASE(UDIV_VL)
10452   NODE_NAME_CASE(UREM_VL)
10453   NODE_NAME_CASE(XOR_VL)
10454   NODE_NAME_CASE(SADDSAT_VL)
10455   NODE_NAME_CASE(UADDSAT_VL)
10456   NODE_NAME_CASE(SSUBSAT_VL)
10457   NODE_NAME_CASE(USUBSAT_VL)
10458   NODE_NAME_CASE(FADD_VL)
10459   NODE_NAME_CASE(FSUB_VL)
10460   NODE_NAME_CASE(FMUL_VL)
10461   NODE_NAME_CASE(FDIV_VL)
10462   NODE_NAME_CASE(FNEG_VL)
10463   NODE_NAME_CASE(FABS_VL)
10464   NODE_NAME_CASE(FSQRT_VL)
10465   NODE_NAME_CASE(FMA_VL)
10466   NODE_NAME_CASE(FCOPYSIGN_VL)
10467   NODE_NAME_CASE(SMIN_VL)
10468   NODE_NAME_CASE(SMAX_VL)
10469   NODE_NAME_CASE(UMIN_VL)
10470   NODE_NAME_CASE(UMAX_VL)
10471   NODE_NAME_CASE(FMINNUM_VL)
10472   NODE_NAME_CASE(FMAXNUM_VL)
10473   NODE_NAME_CASE(MULHS_VL)
10474   NODE_NAME_CASE(MULHU_VL)
10475   NODE_NAME_CASE(FP_TO_SINT_VL)
10476   NODE_NAME_CASE(FP_TO_UINT_VL)
10477   NODE_NAME_CASE(SINT_TO_FP_VL)
10478   NODE_NAME_CASE(UINT_TO_FP_VL)
10479   NODE_NAME_CASE(FP_EXTEND_VL)
10480   NODE_NAME_CASE(FP_ROUND_VL)
10481   NODE_NAME_CASE(VWMUL_VL)
10482   NODE_NAME_CASE(VWMULU_VL)
10483   NODE_NAME_CASE(VWMULSU_VL)
10484   NODE_NAME_CASE(VWADD_VL)
10485   NODE_NAME_CASE(VWADDU_VL)
10486   NODE_NAME_CASE(VWSUB_VL)
10487   NODE_NAME_CASE(VWSUBU_VL)
10488   NODE_NAME_CASE(VWADD_W_VL)
10489   NODE_NAME_CASE(VWADDU_W_VL)
10490   NODE_NAME_CASE(VWSUB_W_VL)
10491   NODE_NAME_CASE(VWSUBU_W_VL)
10492   NODE_NAME_CASE(SETCC_VL)
10493   NODE_NAME_CASE(VSELECT_VL)
10494   NODE_NAME_CASE(VP_MERGE_VL)
10495   NODE_NAME_CASE(VMAND_VL)
10496   NODE_NAME_CASE(VMOR_VL)
10497   NODE_NAME_CASE(VMXOR_VL)
10498   NODE_NAME_CASE(VMCLR_VL)
10499   NODE_NAME_CASE(VMSET_VL)
10500   NODE_NAME_CASE(VRGATHER_VX_VL)
10501   NODE_NAME_CASE(VRGATHER_VV_VL)
10502   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10503   NODE_NAME_CASE(VSEXT_VL)
10504   NODE_NAME_CASE(VZEXT_VL)
10505   NODE_NAME_CASE(VCPOP_VL)
10506   NODE_NAME_CASE(VLE_VL)
10507   NODE_NAME_CASE(VSE_VL)
10508   NODE_NAME_CASE(READ_CSR)
10509   NODE_NAME_CASE(WRITE_CSR)
10510   NODE_NAME_CASE(SWAP_CSR)
10511   }
10512   // clang-format on
10513   return nullptr;
10514 #undef NODE_NAME_CASE
10515 }
10516 
10517 /// getConstraintType - Given a constraint letter, return the type of
10518 /// constraint it is for this target.
10519 RISCVTargetLowering::ConstraintType
10520 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10521   if (Constraint.size() == 1) {
10522     switch (Constraint[0]) {
10523     default:
10524       break;
10525     case 'f':
10526       return C_RegisterClass;
10527     case 'I':
10528     case 'J':
10529     case 'K':
10530       return C_Immediate;
10531     case 'A':
10532       return C_Memory;
10533     case 'S': // A symbolic address
10534       return C_Other;
10535     }
10536   } else {
10537     if (Constraint == "vr" || Constraint == "vm")
10538       return C_RegisterClass;
10539   }
10540   return TargetLowering::getConstraintType(Constraint);
10541 }
10542 
10543 std::pair<unsigned, const TargetRegisterClass *>
10544 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10545                                                   StringRef Constraint,
10546                                                   MVT VT) const {
10547   // First, see if this is a constraint that directly corresponds to a
10548   // RISCV register class.
10549   if (Constraint.size() == 1) {
10550     switch (Constraint[0]) {
10551     case 'r':
10552       // TODO: Support fixed vectors up to XLen for P extension?
10553       if (VT.isVector())
10554         break;
10555       return std::make_pair(0U, &RISCV::GPRRegClass);
10556     case 'f':
10557       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10558         return std::make_pair(0U, &RISCV::FPR16RegClass);
10559       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10560         return std::make_pair(0U, &RISCV::FPR32RegClass);
10561       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10562         return std::make_pair(0U, &RISCV::FPR64RegClass);
10563       break;
10564     default:
10565       break;
10566     }
10567   } else if (Constraint == "vr") {
10568     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10569                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10570       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10571         return std::make_pair(0U, RC);
10572     }
10573   } else if (Constraint == "vm") {
10574     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10575       return std::make_pair(0U, &RISCV::VMV0RegClass);
10576   }
10577 
10578   // Clang will correctly decode the usage of register name aliases into their
10579   // official names. However, other frontends like `rustc` do not. This allows
10580   // users of these frontends to use the ABI names for registers in LLVM-style
10581   // register constraints.
10582   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10583                                .Case("{zero}", RISCV::X0)
10584                                .Case("{ra}", RISCV::X1)
10585                                .Case("{sp}", RISCV::X2)
10586                                .Case("{gp}", RISCV::X3)
10587                                .Case("{tp}", RISCV::X4)
10588                                .Case("{t0}", RISCV::X5)
10589                                .Case("{t1}", RISCV::X6)
10590                                .Case("{t2}", RISCV::X7)
10591                                .Cases("{s0}", "{fp}", RISCV::X8)
10592                                .Case("{s1}", RISCV::X9)
10593                                .Case("{a0}", RISCV::X10)
10594                                .Case("{a1}", RISCV::X11)
10595                                .Case("{a2}", RISCV::X12)
10596                                .Case("{a3}", RISCV::X13)
10597                                .Case("{a4}", RISCV::X14)
10598                                .Case("{a5}", RISCV::X15)
10599                                .Case("{a6}", RISCV::X16)
10600                                .Case("{a7}", RISCV::X17)
10601                                .Case("{s2}", RISCV::X18)
10602                                .Case("{s3}", RISCV::X19)
10603                                .Case("{s4}", RISCV::X20)
10604                                .Case("{s5}", RISCV::X21)
10605                                .Case("{s6}", RISCV::X22)
10606                                .Case("{s7}", RISCV::X23)
10607                                .Case("{s8}", RISCV::X24)
10608                                .Case("{s9}", RISCV::X25)
10609                                .Case("{s10}", RISCV::X26)
10610                                .Case("{s11}", RISCV::X27)
10611                                .Case("{t3}", RISCV::X28)
10612                                .Case("{t4}", RISCV::X29)
10613                                .Case("{t5}", RISCV::X30)
10614                                .Case("{t6}", RISCV::X31)
10615                                .Default(RISCV::NoRegister);
10616   if (XRegFromAlias != RISCV::NoRegister)
10617     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10618 
10619   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10620   // TableGen record rather than the AsmName to choose registers for InlineAsm
10621   // constraints, plus we want to match those names to the widest floating point
10622   // register type available, manually select floating point registers here.
10623   //
10624   // The second case is the ABI name of the register, so that frontends can also
10625   // use the ABI names in register constraint lists.
10626   if (Subtarget.hasStdExtF()) {
10627     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10628                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10629                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10630                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10631                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10632                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10633                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10634                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10635                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10636                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10637                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10638                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10639                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10640                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10641                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10642                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10643                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10644                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10645                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10646                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10647                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10648                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10649                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10650                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10651                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10652                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10653                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10654                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10655                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10656                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10657                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10658                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10659                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10660                         .Default(RISCV::NoRegister);
10661     if (FReg != RISCV::NoRegister) {
10662       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10663       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10664         unsigned RegNo = FReg - RISCV::F0_F;
10665         unsigned DReg = RISCV::F0_D + RegNo;
10666         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10667       }
10668       if (VT == MVT::f32 || VT == MVT::Other)
10669         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10670       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10671         unsigned RegNo = FReg - RISCV::F0_F;
10672         unsigned HReg = RISCV::F0_H + RegNo;
10673         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10674       }
10675     }
10676   }
10677 
10678   if (Subtarget.hasVInstructions()) {
10679     Register VReg = StringSwitch<Register>(Constraint.lower())
10680                         .Case("{v0}", RISCV::V0)
10681                         .Case("{v1}", RISCV::V1)
10682                         .Case("{v2}", RISCV::V2)
10683                         .Case("{v3}", RISCV::V3)
10684                         .Case("{v4}", RISCV::V4)
10685                         .Case("{v5}", RISCV::V5)
10686                         .Case("{v6}", RISCV::V6)
10687                         .Case("{v7}", RISCV::V7)
10688                         .Case("{v8}", RISCV::V8)
10689                         .Case("{v9}", RISCV::V9)
10690                         .Case("{v10}", RISCV::V10)
10691                         .Case("{v11}", RISCV::V11)
10692                         .Case("{v12}", RISCV::V12)
10693                         .Case("{v13}", RISCV::V13)
10694                         .Case("{v14}", RISCV::V14)
10695                         .Case("{v15}", RISCV::V15)
10696                         .Case("{v16}", RISCV::V16)
10697                         .Case("{v17}", RISCV::V17)
10698                         .Case("{v18}", RISCV::V18)
10699                         .Case("{v19}", RISCV::V19)
10700                         .Case("{v20}", RISCV::V20)
10701                         .Case("{v21}", RISCV::V21)
10702                         .Case("{v22}", RISCV::V22)
10703                         .Case("{v23}", RISCV::V23)
10704                         .Case("{v24}", RISCV::V24)
10705                         .Case("{v25}", RISCV::V25)
10706                         .Case("{v26}", RISCV::V26)
10707                         .Case("{v27}", RISCV::V27)
10708                         .Case("{v28}", RISCV::V28)
10709                         .Case("{v29}", RISCV::V29)
10710                         .Case("{v30}", RISCV::V30)
10711                         .Case("{v31}", RISCV::V31)
10712                         .Default(RISCV::NoRegister);
10713     if (VReg != RISCV::NoRegister) {
10714       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10715         return std::make_pair(VReg, &RISCV::VMRegClass);
10716       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10717         return std::make_pair(VReg, &RISCV::VRRegClass);
10718       for (const auto *RC :
10719            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10720         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10721           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10722           return std::make_pair(VReg, RC);
10723         }
10724       }
10725     }
10726   }
10727 
10728   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10729 }
10730 
10731 unsigned
10732 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10733   // Currently only support length 1 constraints.
10734   if (ConstraintCode.size() == 1) {
10735     switch (ConstraintCode[0]) {
10736     case 'A':
10737       return InlineAsm::Constraint_A;
10738     default:
10739       break;
10740     }
10741   }
10742 
10743   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10744 }
10745 
10746 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10747     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10748     SelectionDAG &DAG) const {
10749   // Currently only support length 1 constraints.
10750   if (Constraint.length() == 1) {
10751     switch (Constraint[0]) {
10752     case 'I':
10753       // Validate & create a 12-bit signed immediate operand.
10754       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10755         uint64_t CVal = C->getSExtValue();
10756         if (isInt<12>(CVal))
10757           Ops.push_back(
10758               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10759       }
10760       return;
10761     case 'J':
10762       // Validate & create an integer zero operand.
10763       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10764         if (C->getZExtValue() == 0)
10765           Ops.push_back(
10766               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10767       return;
10768     case 'K':
10769       // Validate & create a 5-bit unsigned immediate operand.
10770       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10771         uint64_t CVal = C->getZExtValue();
10772         if (isUInt<5>(CVal))
10773           Ops.push_back(
10774               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10775       }
10776       return;
10777     case 'S':
10778       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10779         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10780                                                  GA->getValueType(0)));
10781       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10782         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10783                                                 BA->getValueType(0)));
10784       }
10785       return;
10786     default:
10787       break;
10788     }
10789   }
10790   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10791 }
10792 
10793 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10794                                                    Instruction *Inst,
10795                                                    AtomicOrdering Ord) const {
10796   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10797     return Builder.CreateFence(Ord);
10798   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10799     return Builder.CreateFence(AtomicOrdering::Release);
10800   return nullptr;
10801 }
10802 
10803 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10804                                                     Instruction *Inst,
10805                                                     AtomicOrdering Ord) const {
10806   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10807     return Builder.CreateFence(AtomicOrdering::Acquire);
10808   return nullptr;
10809 }
10810 
10811 TargetLowering::AtomicExpansionKind
10812 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10813   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
10814   // point operations can't be used in an lr/sc sequence without breaking the
10815   // forward-progress guarantee.
10816   if (AI->isFloatingPointOperation())
10817     return AtomicExpansionKind::CmpXChg;
10818 
10819   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
10820   if (Size == 8 || Size == 16)
10821     return AtomicExpansionKind::MaskedIntrinsic;
10822   return AtomicExpansionKind::None;
10823 }
10824 
10825 static Intrinsic::ID
10826 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
10827   if (XLen == 32) {
10828     switch (BinOp) {
10829     default:
10830       llvm_unreachable("Unexpected AtomicRMW BinOp");
10831     case AtomicRMWInst::Xchg:
10832       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
10833     case AtomicRMWInst::Add:
10834       return Intrinsic::riscv_masked_atomicrmw_add_i32;
10835     case AtomicRMWInst::Sub:
10836       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
10837     case AtomicRMWInst::Nand:
10838       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
10839     case AtomicRMWInst::Max:
10840       return Intrinsic::riscv_masked_atomicrmw_max_i32;
10841     case AtomicRMWInst::Min:
10842       return Intrinsic::riscv_masked_atomicrmw_min_i32;
10843     case AtomicRMWInst::UMax:
10844       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
10845     case AtomicRMWInst::UMin:
10846       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
10847     }
10848   }
10849 
10850   if (XLen == 64) {
10851     switch (BinOp) {
10852     default:
10853       llvm_unreachable("Unexpected AtomicRMW BinOp");
10854     case AtomicRMWInst::Xchg:
10855       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
10856     case AtomicRMWInst::Add:
10857       return Intrinsic::riscv_masked_atomicrmw_add_i64;
10858     case AtomicRMWInst::Sub:
10859       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
10860     case AtomicRMWInst::Nand:
10861       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
10862     case AtomicRMWInst::Max:
10863       return Intrinsic::riscv_masked_atomicrmw_max_i64;
10864     case AtomicRMWInst::Min:
10865       return Intrinsic::riscv_masked_atomicrmw_min_i64;
10866     case AtomicRMWInst::UMax:
10867       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
10868     case AtomicRMWInst::UMin:
10869       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
10870     }
10871   }
10872 
10873   llvm_unreachable("Unexpected XLen\n");
10874 }
10875 
10876 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
10877     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
10878     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
10879   unsigned XLen = Subtarget.getXLen();
10880   Value *Ordering =
10881       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
10882   Type *Tys[] = {AlignedAddr->getType()};
10883   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
10884       AI->getModule(),
10885       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
10886 
10887   if (XLen == 64) {
10888     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10889     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10890     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10891   }
10892 
10893   Value *Result;
10894 
10895   // Must pass the shift amount needed to sign extend the loaded value prior
10896   // to performing a signed comparison for min/max. ShiftAmt is the number of
10897   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
10898   // is the number of bits to left+right shift the value in order to
10899   // sign-extend.
10900   if (AI->getOperation() == AtomicRMWInst::Min ||
10901       AI->getOperation() == AtomicRMWInst::Max) {
10902     const DataLayout &DL = AI->getModule()->getDataLayout();
10903     unsigned ValWidth =
10904         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
10905     Value *SextShamt =
10906         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
10907     Result = Builder.CreateCall(LrwOpScwLoop,
10908                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10909   } else {
10910     Result =
10911         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10912   }
10913 
10914   if (XLen == 64)
10915     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10916   return Result;
10917 }
10918 
10919 TargetLowering::AtomicExpansionKind
10920 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
10921     AtomicCmpXchgInst *CI) const {
10922   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
10923   if (Size == 8 || Size == 16)
10924     return AtomicExpansionKind::MaskedIntrinsic;
10925   return AtomicExpansionKind::None;
10926 }
10927 
10928 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
10929     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
10930     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
10931   unsigned XLen = Subtarget.getXLen();
10932   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
10933   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
10934   if (XLen == 64) {
10935     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10936     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10937     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10938     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
10939   }
10940   Type *Tys[] = {AlignedAddr->getType()};
10941   Function *MaskedCmpXchg =
10942       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
10943   Value *Result = Builder.CreateCall(
10944       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
10945   if (XLen == 64)
10946     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10947   return Result;
10948 }
10949 
10950 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
10951   return false;
10952 }
10953 
10954 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
10955                                                EVT VT) const {
10956   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
10957     return false;
10958 
10959   switch (FPVT.getSimpleVT().SimpleTy) {
10960   case MVT::f16:
10961     return Subtarget.hasStdExtZfh();
10962   case MVT::f32:
10963     return Subtarget.hasStdExtF();
10964   case MVT::f64:
10965     return Subtarget.hasStdExtD();
10966   default:
10967     return false;
10968   }
10969 }
10970 
10971 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
10972   // If we are using the small code model, we can reduce size of jump table
10973   // entry to 4 bytes.
10974   if (Subtarget.is64Bit() && !isPositionIndependent() &&
10975       getTargetMachine().getCodeModel() == CodeModel::Small) {
10976     return MachineJumpTableInfo::EK_Custom32;
10977   }
10978   return TargetLowering::getJumpTableEncoding();
10979 }
10980 
10981 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
10982     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
10983     unsigned uid, MCContext &Ctx) const {
10984   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
10985          getTargetMachine().getCodeModel() == CodeModel::Small);
10986   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
10987 }
10988 
10989 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
10990                                                      EVT VT) const {
10991   VT = VT.getScalarType();
10992 
10993   if (!VT.isSimple())
10994     return false;
10995 
10996   switch (VT.getSimpleVT().SimpleTy) {
10997   case MVT::f16:
10998     return Subtarget.hasStdExtZfh();
10999   case MVT::f32:
11000     return Subtarget.hasStdExtF();
11001   case MVT::f64:
11002     return Subtarget.hasStdExtD();
11003   default:
11004     break;
11005   }
11006 
11007   return false;
11008 }
11009 
11010 Register RISCVTargetLowering::getExceptionPointerRegister(
11011     const Constant *PersonalityFn) const {
11012   return RISCV::X10;
11013 }
11014 
11015 Register RISCVTargetLowering::getExceptionSelectorRegister(
11016     const Constant *PersonalityFn) const {
11017   return RISCV::X11;
11018 }
11019 
11020 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11021   // Return false to suppress the unnecessary extensions if the LibCall
11022   // arguments or return value is f32 type for LP64 ABI.
11023   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11024   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11025     return false;
11026 
11027   return true;
11028 }
11029 
11030 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11031   if (Subtarget.is64Bit() && Type == MVT::i32)
11032     return true;
11033 
11034   return IsSigned;
11035 }
11036 
11037 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11038                                                  SDValue C) const {
11039   // Check integral scalar types.
11040   if (VT.isScalarInteger()) {
11041     // Omit the optimization if the sub target has the M extension and the data
11042     // size exceeds XLen.
11043     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11044       return false;
11045     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11046       // Break the MUL to a SLLI and an ADD/SUB.
11047       const APInt &Imm = ConstNode->getAPIntValue();
11048       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11049           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11050         return true;
11051       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11052       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11053           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11054            (Imm - 8).isPowerOf2()))
11055         return true;
11056       // Omit the following optimization if the sub target has the M extension
11057       // and the data size >= XLen.
11058       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11059         return false;
11060       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11061       // a pair of LUI/ADDI.
11062       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11063         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11064         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11065             (1 - ImmS).isPowerOf2())
11066         return true;
11067       }
11068     }
11069   }
11070 
11071   return false;
11072 }
11073 
11074 bool RISCVTargetLowering::isMulAddWithConstProfitable(
11075     const SDValue &AddNode, const SDValue &ConstNode) const {
11076   // Let the DAGCombiner decide for vectors.
11077   EVT VT = AddNode.getValueType();
11078   if (VT.isVector())
11079     return true;
11080 
11081   // Let the DAGCombiner decide for larger types.
11082   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11083     return true;
11084 
11085   // It is worse if c1 is simm12 while c1*c2 is not.
11086   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11087   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11088   const APInt &C1 = C1Node->getAPIntValue();
11089   const APInt &C2 = C2Node->getAPIntValue();
11090   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11091     return false;
11092 
11093   // Default to true and let the DAGCombiner decide.
11094   return true;
11095 }
11096 
11097 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11098     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11099     bool *Fast) const {
11100   if (!VT.isVector())
11101     return false;
11102 
11103   EVT ElemVT = VT.getVectorElementType();
11104   if (Alignment >= ElemVT.getStoreSize()) {
11105     if (Fast)
11106       *Fast = true;
11107     return true;
11108   }
11109 
11110   return false;
11111 }
11112 
11113 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11114     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11115     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11116   bool IsABIRegCopy = CC.hasValue();
11117   EVT ValueVT = Val.getValueType();
11118   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11119     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11120     // and cast to f32.
11121     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11122     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11123     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11124                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11125     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11126     Parts[0] = Val;
11127     return true;
11128   }
11129 
11130   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11131     LLVMContext &Context = *DAG.getContext();
11132     EVT ValueEltVT = ValueVT.getVectorElementType();
11133     EVT PartEltVT = PartVT.getVectorElementType();
11134     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11135     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11136     if (PartVTBitSize % ValueVTBitSize == 0) {
11137       assert(PartVTBitSize >= ValueVTBitSize);
11138       // If the element types are different, bitcast to the same element type of
11139       // PartVT first.
11140       // Give an example here, we want copy a <vscale x 1 x i8> value to
11141       // <vscale x 4 x i16>.
11142       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11143       // subvector, then we can bitcast to <vscale x 4 x i16>.
11144       if (ValueEltVT != PartEltVT) {
11145         if (PartVTBitSize > ValueVTBitSize) {
11146           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11147           assert(Count != 0 && "The number of element should not be zero.");
11148           EVT SameEltTypeVT =
11149               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11150           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11151                             DAG.getUNDEF(SameEltTypeVT), Val,
11152                             DAG.getVectorIdxConstant(0, DL));
11153         }
11154         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11155       } else {
11156         Val =
11157             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11158                         Val, DAG.getVectorIdxConstant(0, DL));
11159       }
11160       Parts[0] = Val;
11161       return true;
11162     }
11163   }
11164   return false;
11165 }
11166 
11167 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11168     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11169     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11170   bool IsABIRegCopy = CC.hasValue();
11171   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11172     SDValue Val = Parts[0];
11173 
11174     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11175     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11176     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11177     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11178     return Val;
11179   }
11180 
11181   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11182     LLVMContext &Context = *DAG.getContext();
11183     SDValue Val = Parts[0];
11184     EVT ValueEltVT = ValueVT.getVectorElementType();
11185     EVT PartEltVT = PartVT.getVectorElementType();
11186     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11187     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11188     if (PartVTBitSize % ValueVTBitSize == 0) {
11189       assert(PartVTBitSize >= ValueVTBitSize);
11190       EVT SameEltTypeVT = ValueVT;
11191       // If the element types are different, convert it to the same element type
11192       // of PartVT.
11193       // Give an example here, we want copy a <vscale x 1 x i8> value from
11194       // <vscale x 4 x i16>.
11195       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11196       // then we can extract <vscale x 1 x i8>.
11197       if (ValueEltVT != PartEltVT) {
11198         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11199         assert(Count != 0 && "The number of element should not be zero.");
11200         SameEltTypeVT =
11201             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11202         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11203       }
11204       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11205                         DAG.getVectorIdxConstant(0, DL));
11206       return Val;
11207     }
11208   }
11209   return SDValue();
11210 }
11211 
11212 SDValue
11213 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11214                                    SelectionDAG &DAG,
11215                                    SmallVectorImpl<SDNode *> &Created) const {
11216   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11217   if (isIntDivCheap(N->getValueType(0), Attr))
11218     return SDValue(N, 0); // Lower SDIV as SDIV
11219 
11220   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11221          "Unexpected divisor!");
11222 
11223   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11224   if (!Subtarget.hasStdExtZbt())
11225     return SDValue();
11226 
11227   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11228   // Besides, more critical path instructions will be generated when dividing
11229   // by 2. So we keep using the original DAGs for these cases.
11230   unsigned Lg2 = Divisor.countTrailingZeros();
11231   if (Lg2 == 1 || Lg2 >= 12)
11232     return SDValue();
11233 
11234   // fold (sdiv X, pow2)
11235   EVT VT = N->getValueType(0);
11236   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11237     return SDValue();
11238 
11239   SDLoc DL(N);
11240   SDValue N0 = N->getOperand(0);
11241   SDValue Zero = DAG.getConstant(0, DL, VT);
11242   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11243 
11244   // Add (N0 < 0) ? Pow2 - 1 : 0;
11245   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11246   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11247   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11248 
11249   Created.push_back(Cmp.getNode());
11250   Created.push_back(Add.getNode());
11251   Created.push_back(Sel.getNode());
11252 
11253   // Divide by pow2.
11254   SDValue SRA =
11255       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11256 
11257   // If we're dividing by a positive value, we're done.  Otherwise, we must
11258   // negate the result.
11259   if (Divisor.isNonNegative())
11260     return SRA;
11261 
11262   Created.push_back(SRA.getNode());
11263   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11264 }
11265 
11266 #define GET_REGISTER_MATCHER
11267 #include "RISCVGenAsmMatcher.inc"
11268 
11269 Register
11270 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11271                                        const MachineFunction &MF) const {
11272   Register Reg = MatchRegisterAltName(RegName);
11273   if (Reg == RISCV::NoRegister)
11274     Reg = MatchRegisterName(RegName);
11275   if (Reg == RISCV::NoRegister)
11276     report_fatal_error(
11277         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11278   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11279   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11280     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11281                              StringRef(RegName) + "\"."));
11282   return Reg;
11283 }
11284 
11285 namespace llvm {
11286 namespace RISCVVIntrinsicsTable {
11287 
11288 #define GET_RISCVVIntrinsicsTable_IMPL
11289 #include "RISCVGenSearchableTables.inc"
11290 
11291 } // namespace RISCVVIntrinsicsTable
11292 
11293 } // namespace llvm
11294