1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306   }
307 
308   if (Subtarget.hasStdExtZbt()) {
309     setOperationAction(ISD::FSHL, XLenVT, Custom);
310     setOperationAction(ISD::FSHR, XLenVT, Custom);
311     setOperationAction(ISD::SELECT, XLenVT, Legal);
312 
313     if (Subtarget.is64Bit()) {
314       setOperationAction(ISD::FSHL, MVT::i32, Custom);
315       setOperationAction(ISD::FSHR, MVT::i32, Custom);
316     }
317   } else {
318     setOperationAction(ISD::SELECT, XLenVT, Custom);
319   }
320 
321   static const ISD::CondCode FPCCToExpand[] = {
322       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
323       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
324       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
325 
326   static const ISD::NodeType FPOpToExpand[] = {
327       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
328       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
329 
330   if (Subtarget.hasStdExtZfh())
331     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
332 
333   if (Subtarget.hasStdExtZfh()) {
334     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
336     setOperationAction(ISD::LRINT, MVT::f16, Legal);
337     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
338     setOperationAction(ISD::LROUND, MVT::f16, Legal);
339     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
345     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
346     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
349     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
350     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
351     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
352     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
353     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
354     for (auto CC : FPCCToExpand)
355       setCondCodeAction(CC, MVT::f16, Expand);
356     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
357     setOperationAction(ISD::SELECT, MVT::f16, Custom);
358     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
359 
360     setOperationAction(ISD::FREM,       MVT::f16, Promote);
361     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
362     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
363     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
364     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
365     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
366     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
367     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
368     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
369     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
370     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
371     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
372     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
373     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
374     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
375     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
376     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
377     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
378 
379     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
380     // complete support for all operations in LegalizeDAG.
381 
382     // We need to custom promote this.
383     if (Subtarget.is64Bit())
384       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
385   }
386 
387   if (Subtarget.hasStdExtF()) {
388     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
389     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
390     setOperationAction(ISD::LRINT, MVT::f32, Legal);
391     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
392     setOperationAction(ISD::LROUND, MVT::f32, Legal);
393     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
395     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
396     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
397     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
398     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
399     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
400     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
401     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
402     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
404     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
405     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
406     for (auto CC : FPCCToExpand)
407       setCondCodeAction(CC, MVT::f32, Expand);
408     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
409     setOperationAction(ISD::SELECT, MVT::f32, Custom);
410     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f32, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
418     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
419 
420   if (Subtarget.hasStdExtD()) {
421     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
422     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
423     setOperationAction(ISD::LRINT, MVT::f64, Legal);
424     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
425     setOperationAction(ISD::LROUND, MVT::f64, Legal);
426     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
428     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
429     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
431     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
432     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
433     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
435     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
436     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
437     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
438     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
439     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
440     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
441     for (auto CC : FPCCToExpand)
442       setCondCodeAction(CC, MVT::f64, Expand);
443     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
444     setOperationAction(ISD::SELECT, MVT::f64, Custom);
445     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
446     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
447     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
448     for (auto Op : FPOpToExpand)
449       setOperationAction(Op, MVT::f64, Expand);
450     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
451     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
452   }
453 
454   if (Subtarget.is64Bit()) {
455     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
456     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
457     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
458     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
459   }
460 
461   if (Subtarget.hasStdExtF()) {
462     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
463     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
464 
465     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
466     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
467     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
468     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
469 
470     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
471     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
472   }
473 
474   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
475   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
476   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
477   setOperationAction(ISD::JumpTable, XLenVT, Custom);
478 
479   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
480 
481   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
482   // Unfortunately this can't be determined just from the ISA naming string.
483   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
484                      Subtarget.is64Bit() ? Legal : Custom);
485 
486   setOperationAction(ISD::TRAP, MVT::Other, Legal);
487   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
488   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
489   if (Subtarget.is64Bit())
490     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
491 
492   if (Subtarget.hasStdExtA()) {
493     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
494     setMinCmpXchgSizeInBits(32);
495   } else {
496     setMaxAtomicSizeInBitsSupported(0);
497   }
498 
499   setBooleanContents(ZeroOrOneBooleanContent);
500 
501   if (Subtarget.hasVInstructions()) {
502     setBooleanVectorContents(ZeroOrOneBooleanContent);
503 
504     setOperationAction(ISD::VSCALE, XLenVT, Custom);
505 
506     // RVV intrinsics may have illegal operands.
507     // We also need to custom legalize vmv.x.s.
508     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
509     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
510     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
511     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
512     if (Subtarget.is64Bit()) {
513       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
514     } else {
515       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
516       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
517     }
518 
519     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
520     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
521 
522     static const unsigned IntegerVPOps[] = {
523         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
524         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
525         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
526         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
527         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
528         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
529         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
530         ISD::VP_MERGE,       ISD::VP_SELECT};
531 
532     static const unsigned FloatingPointVPOps[] = {
533         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
534         ISD::VP_FDIV,        ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
535         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,
536         ISD::VP_SELECT};
537 
538     if (!Subtarget.is64Bit()) {
539       // We must custom-lower certain vXi64 operations on RV32 due to the vector
540       // element type being illegal.
541       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
542       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
543 
544       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
545       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
546       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
547       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
548       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
549       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
550       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
551       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
552 
553       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
554       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
555       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
556       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
557       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
558       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
559       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
560       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
561     }
562 
563     for (MVT VT : BoolVecVTs) {
564       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
565 
566       // Mask VTs are custom-expanded into a series of standard nodes
567       setOperationAction(ISD::TRUNCATE, VT, Custom);
568       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
569       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
570       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
571 
572       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
573       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
574 
575       setOperationAction(ISD::SELECT, VT, Custom);
576       setOperationAction(ISD::SELECT_CC, VT, Expand);
577       setOperationAction(ISD::VSELECT, VT, Expand);
578       setOperationAction(ISD::VP_MERGE, VT, Expand);
579       setOperationAction(ISD::VP_SELECT, VT, Expand);
580 
581       setOperationAction(ISD::VP_AND, VT, Custom);
582       setOperationAction(ISD::VP_OR, VT, Custom);
583       setOperationAction(ISD::VP_XOR, VT, Custom);
584 
585       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
586       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
587       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
588 
589       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
590       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
591       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
592 
593       // RVV has native int->float & float->int conversions where the
594       // element type sizes are within one power-of-two of each other. Any
595       // wider distances between type sizes have to be lowered as sequences
596       // which progressively narrow the gap in stages.
597       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
598       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
599       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
600       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
601 
602       // Expand all extending loads to types larger than this, and truncating
603       // stores from types larger than this.
604       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
605         setTruncStoreAction(OtherVT, VT, Expand);
606         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
607         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
608         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
609       }
610     }
611 
612     for (MVT VT : IntVecVTs) {
613       if (VT.getVectorElementType() == MVT::i64 &&
614           !Subtarget.hasVInstructionsI64())
615         continue;
616 
617       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
618       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
619 
620       // Vectors implement MULHS/MULHU.
621       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
622       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
623 
624       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
625       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
626         setOperationAction(ISD::MULHU, VT, Expand);
627         setOperationAction(ISD::MULHS, VT, Expand);
628       }
629 
630       setOperationAction(ISD::SMIN, VT, Legal);
631       setOperationAction(ISD::SMAX, VT, Legal);
632       setOperationAction(ISD::UMIN, VT, Legal);
633       setOperationAction(ISD::UMAX, VT, Legal);
634 
635       setOperationAction(ISD::ROTL, VT, Expand);
636       setOperationAction(ISD::ROTR, VT, Expand);
637 
638       setOperationAction(ISD::CTTZ, VT, Expand);
639       setOperationAction(ISD::CTLZ, VT, Expand);
640       setOperationAction(ISD::CTPOP, VT, Expand);
641 
642       setOperationAction(ISD::BSWAP, VT, Expand);
643 
644       // Custom-lower extensions and truncations from/to mask types.
645       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
646       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
647       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
648 
649       // RVV has native int->float & float->int conversions where the
650       // element type sizes are within one power-of-two of each other. Any
651       // wider distances between type sizes have to be lowered as sequences
652       // which progressively narrow the gap in stages.
653       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
654       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
655       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
656       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
657 
658       setOperationAction(ISD::SADDSAT, VT, Legal);
659       setOperationAction(ISD::UADDSAT, VT, Legal);
660       setOperationAction(ISD::SSUBSAT, VT, Legal);
661       setOperationAction(ISD::USUBSAT, VT, Legal);
662 
663       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
664       // nodes which truncate by one power of two at a time.
665       setOperationAction(ISD::TRUNCATE, VT, Custom);
666 
667       // Custom-lower insert/extract operations to simplify patterns.
668       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
669       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
670 
671       // Custom-lower reduction operations to set up the corresponding custom
672       // nodes' operands.
673       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
674       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
675       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
676       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
677       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
678       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
679       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
680       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
681 
682       for (unsigned VPOpc : IntegerVPOps)
683         setOperationAction(VPOpc, VT, Custom);
684 
685       setOperationAction(ISD::LOAD, VT, Custom);
686       setOperationAction(ISD::STORE, VT, Custom);
687 
688       setOperationAction(ISD::MLOAD, VT, Custom);
689       setOperationAction(ISD::MSTORE, VT, Custom);
690       setOperationAction(ISD::MGATHER, VT, Custom);
691       setOperationAction(ISD::MSCATTER, VT, Custom);
692 
693       setOperationAction(ISD::VP_LOAD, VT, Custom);
694       setOperationAction(ISD::VP_STORE, VT, Custom);
695       setOperationAction(ISD::VP_GATHER, VT, Custom);
696       setOperationAction(ISD::VP_SCATTER, VT, Custom);
697 
698       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
699       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
700       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
701 
702       setOperationAction(ISD::SELECT, VT, Custom);
703       setOperationAction(ISD::SELECT_CC, VT, Expand);
704 
705       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
706       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
707 
708       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
709         setTruncStoreAction(VT, OtherVT, Expand);
710         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
711         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
712         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
713       }
714 
715       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
716       // type that can represent the value exactly.
717       if (VT.getVectorElementType() != MVT::i64) {
718         MVT FloatEltVT =
719             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
720         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
721         if (isTypeLegal(FloatVT)) {
722           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
723           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
724         }
725       }
726     }
727 
728     // Expand various CCs to best match the RVV ISA, which natively supports UNE
729     // but no other unordered comparisons, and supports all ordered comparisons
730     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
731     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
732     // and we pattern-match those back to the "original", swapping operands once
733     // more. This way we catch both operations and both "vf" and "fv" forms with
734     // fewer patterns.
735     static const ISD::CondCode VFPCCToExpand[] = {
736         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
737         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
738         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
739     };
740 
741     // Sets common operation actions on RVV floating-point vector types.
742     const auto SetCommonVFPActions = [&](MVT VT) {
743       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
744       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
745       // sizes are within one power-of-two of each other. Therefore conversions
746       // between vXf16 and vXf64 must be lowered as sequences which convert via
747       // vXf32.
748       setOperationAction(ISD::FP_ROUND, VT, Custom);
749       setOperationAction(ISD::FP_EXTEND, VT, Custom);
750       // Custom-lower insert/extract operations to simplify patterns.
751       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
752       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
753       // Expand various condition codes (explained above).
754       for (auto CC : VFPCCToExpand)
755         setCondCodeAction(CC, VT, Expand);
756 
757       setOperationAction(ISD::FMINNUM, VT, Legal);
758       setOperationAction(ISD::FMAXNUM, VT, Legal);
759 
760       setOperationAction(ISD::FTRUNC, VT, Custom);
761       setOperationAction(ISD::FCEIL, VT, Custom);
762       setOperationAction(ISD::FFLOOR, VT, Custom);
763 
764       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
765       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
766       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
767       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
768 
769       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
770 
771       setOperationAction(ISD::LOAD, VT, Custom);
772       setOperationAction(ISD::STORE, VT, Custom);
773 
774       setOperationAction(ISD::MLOAD, VT, Custom);
775       setOperationAction(ISD::MSTORE, VT, Custom);
776       setOperationAction(ISD::MGATHER, VT, Custom);
777       setOperationAction(ISD::MSCATTER, VT, Custom);
778 
779       setOperationAction(ISD::VP_LOAD, VT, Custom);
780       setOperationAction(ISD::VP_STORE, VT, Custom);
781       setOperationAction(ISD::VP_GATHER, VT, Custom);
782       setOperationAction(ISD::VP_SCATTER, VT, Custom);
783 
784       setOperationAction(ISD::SELECT, VT, Custom);
785       setOperationAction(ISD::SELECT_CC, VT, Expand);
786 
787       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
788       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
789       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
790 
791       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
792 
793       for (unsigned VPOpc : FloatingPointVPOps)
794         setOperationAction(VPOpc, VT, Custom);
795     };
796 
797     // Sets common extload/truncstore actions on RVV floating-point vector
798     // types.
799     const auto SetCommonVFPExtLoadTruncStoreActions =
800         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
801           for (auto SmallVT : SmallerVTs) {
802             setTruncStoreAction(VT, SmallVT, Expand);
803             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
804           }
805         };
806 
807     if (Subtarget.hasVInstructionsF16())
808       for (MVT VT : F16VecVTs)
809         SetCommonVFPActions(VT);
810 
811     for (MVT VT : F32VecVTs) {
812       if (Subtarget.hasVInstructionsF32())
813         SetCommonVFPActions(VT);
814       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
815     }
816 
817     for (MVT VT : F64VecVTs) {
818       if (Subtarget.hasVInstructionsF64())
819         SetCommonVFPActions(VT);
820       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
821       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
822     }
823 
824     if (Subtarget.useRVVForFixedLengthVectors()) {
825       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
826         if (!useRVVForFixedLengthVectorVT(VT))
827           continue;
828 
829         // By default everything must be expanded.
830         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
831           setOperationAction(Op, VT, Expand);
832         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
833           setTruncStoreAction(VT, OtherVT, Expand);
834           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
835           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
836           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
837         }
838 
839         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
840         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
841         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
842 
843         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
844         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
845 
846         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
847         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
848 
849         setOperationAction(ISD::LOAD, VT, Custom);
850         setOperationAction(ISD::STORE, VT, Custom);
851 
852         setOperationAction(ISD::SETCC, VT, Custom);
853 
854         setOperationAction(ISD::SELECT, VT, Custom);
855 
856         setOperationAction(ISD::TRUNCATE, VT, Custom);
857 
858         setOperationAction(ISD::BITCAST, VT, Custom);
859 
860         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
861         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
862         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
863 
864         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
865         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
866         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
867 
868         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
869         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
870         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
871         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
872 
873         // Operations below are different for between masks and other vectors.
874         if (VT.getVectorElementType() == MVT::i1) {
875           setOperationAction(ISD::VP_AND, VT, Custom);
876           setOperationAction(ISD::VP_OR, VT, Custom);
877           setOperationAction(ISD::VP_XOR, VT, Custom);
878           setOperationAction(ISD::AND, VT, Custom);
879           setOperationAction(ISD::OR, VT, Custom);
880           setOperationAction(ISD::XOR, VT, Custom);
881           continue;
882         }
883 
884         // Use SPLAT_VECTOR to prevent type legalization from destroying the
885         // splats when type legalizing i64 scalar on RV32.
886         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
887         // improvements first.
888         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
889           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
890           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
891         }
892 
893         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
894         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
895 
896         setOperationAction(ISD::MLOAD, VT, Custom);
897         setOperationAction(ISD::MSTORE, VT, Custom);
898         setOperationAction(ISD::MGATHER, VT, Custom);
899         setOperationAction(ISD::MSCATTER, VT, Custom);
900 
901         setOperationAction(ISD::VP_LOAD, VT, Custom);
902         setOperationAction(ISD::VP_STORE, VT, Custom);
903         setOperationAction(ISD::VP_GATHER, VT, Custom);
904         setOperationAction(ISD::VP_SCATTER, VT, Custom);
905 
906         setOperationAction(ISD::ADD, VT, Custom);
907         setOperationAction(ISD::MUL, VT, Custom);
908         setOperationAction(ISD::SUB, VT, Custom);
909         setOperationAction(ISD::AND, VT, Custom);
910         setOperationAction(ISD::OR, VT, Custom);
911         setOperationAction(ISD::XOR, VT, Custom);
912         setOperationAction(ISD::SDIV, VT, Custom);
913         setOperationAction(ISD::SREM, VT, Custom);
914         setOperationAction(ISD::UDIV, VT, Custom);
915         setOperationAction(ISD::UREM, VT, Custom);
916         setOperationAction(ISD::SHL, VT, Custom);
917         setOperationAction(ISD::SRA, VT, Custom);
918         setOperationAction(ISD::SRL, VT, Custom);
919 
920         setOperationAction(ISD::SMIN, VT, Custom);
921         setOperationAction(ISD::SMAX, VT, Custom);
922         setOperationAction(ISD::UMIN, VT, Custom);
923         setOperationAction(ISD::UMAX, VT, Custom);
924         setOperationAction(ISD::ABS,  VT, Custom);
925 
926         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
927         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
928           setOperationAction(ISD::MULHS, VT, Custom);
929           setOperationAction(ISD::MULHU, VT, Custom);
930         }
931 
932         setOperationAction(ISD::SADDSAT, VT, Custom);
933         setOperationAction(ISD::UADDSAT, VT, Custom);
934         setOperationAction(ISD::SSUBSAT, VT, Custom);
935         setOperationAction(ISD::USUBSAT, VT, Custom);
936 
937         setOperationAction(ISD::VSELECT, VT, Custom);
938         setOperationAction(ISD::SELECT_CC, VT, Expand);
939 
940         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
941         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
942         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
943 
944         // Custom-lower reduction operations to set up the corresponding custom
945         // nodes' operands.
946         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
947         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
948         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
949         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
950         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
951 
952         for (unsigned VPOpc : IntegerVPOps)
953           setOperationAction(VPOpc, VT, Custom);
954 
955         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
956         // type that can represent the value exactly.
957         if (VT.getVectorElementType() != MVT::i64) {
958           MVT FloatEltVT =
959               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
960           EVT FloatVT =
961               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
962           if (isTypeLegal(FloatVT)) {
963             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
964             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
965           }
966         }
967       }
968 
969       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
970         if (!useRVVForFixedLengthVectorVT(VT))
971           continue;
972 
973         // By default everything must be expanded.
974         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
975           setOperationAction(Op, VT, Expand);
976         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
977           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
978           setTruncStoreAction(VT, OtherVT, Expand);
979         }
980 
981         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
982         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
983         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
984 
985         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
986         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
987         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
988         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
989         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
990 
991         setOperationAction(ISD::LOAD, VT, Custom);
992         setOperationAction(ISD::STORE, VT, Custom);
993         setOperationAction(ISD::MLOAD, VT, Custom);
994         setOperationAction(ISD::MSTORE, VT, Custom);
995         setOperationAction(ISD::MGATHER, VT, Custom);
996         setOperationAction(ISD::MSCATTER, VT, Custom);
997 
998         setOperationAction(ISD::VP_LOAD, VT, Custom);
999         setOperationAction(ISD::VP_STORE, VT, Custom);
1000         setOperationAction(ISD::VP_GATHER, VT, Custom);
1001         setOperationAction(ISD::VP_SCATTER, VT, Custom);
1002 
1003         setOperationAction(ISD::FADD, VT, Custom);
1004         setOperationAction(ISD::FSUB, VT, Custom);
1005         setOperationAction(ISD::FMUL, VT, Custom);
1006         setOperationAction(ISD::FDIV, VT, Custom);
1007         setOperationAction(ISD::FNEG, VT, Custom);
1008         setOperationAction(ISD::FABS, VT, Custom);
1009         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1010         setOperationAction(ISD::FSQRT, VT, Custom);
1011         setOperationAction(ISD::FMA, VT, Custom);
1012         setOperationAction(ISD::FMINNUM, VT, Custom);
1013         setOperationAction(ISD::FMAXNUM, VT, Custom);
1014 
1015         setOperationAction(ISD::FP_ROUND, VT, Custom);
1016         setOperationAction(ISD::FP_EXTEND, VT, Custom);
1017 
1018         setOperationAction(ISD::FTRUNC, VT, Custom);
1019         setOperationAction(ISD::FCEIL, VT, Custom);
1020         setOperationAction(ISD::FFLOOR, VT, Custom);
1021 
1022         for (auto CC : VFPCCToExpand)
1023           setCondCodeAction(CC, VT, Expand);
1024 
1025         setOperationAction(ISD::VSELECT, VT, Custom);
1026         setOperationAction(ISD::SELECT, VT, Custom);
1027         setOperationAction(ISD::SELECT_CC, VT, Expand);
1028 
1029         setOperationAction(ISD::BITCAST, VT, Custom);
1030 
1031         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1032         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1033         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1034         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1035 
1036         for (unsigned VPOpc : FloatingPointVPOps)
1037           setOperationAction(VPOpc, VT, Custom);
1038       }
1039 
1040       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1041       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1042       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1043       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1044       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1045       if (Subtarget.hasStdExtZfh())
1046         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1047       if (Subtarget.hasStdExtF())
1048         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1049       if (Subtarget.hasStdExtD())
1050         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1051     }
1052   }
1053 
1054   // Function alignments.
1055   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1056   setMinFunctionAlignment(FunctionAlignment);
1057   setPrefFunctionAlignment(FunctionAlignment);
1058 
1059   setMinimumJumpTableEntries(5);
1060 
1061   // Jumps are expensive, compared to logic
1062   setJumpIsExpensive();
1063 
1064   setTargetDAGCombine(ISD::ADD);
1065   setTargetDAGCombine(ISD::SUB);
1066   setTargetDAGCombine(ISD::AND);
1067   setTargetDAGCombine(ISD::OR);
1068   setTargetDAGCombine(ISD::XOR);
1069   setTargetDAGCombine(ISD::ANY_EXTEND);
1070   if (Subtarget.hasStdExtF()) {
1071     setTargetDAGCombine(ISD::ZERO_EXTEND);
1072     setTargetDAGCombine(ISD::FP_TO_SINT);
1073     setTargetDAGCombine(ISD::FP_TO_UINT);
1074     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1075     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1076   }
1077   if (Subtarget.hasVInstructions()) {
1078     setTargetDAGCombine(ISD::FCOPYSIGN);
1079     setTargetDAGCombine(ISD::MGATHER);
1080     setTargetDAGCombine(ISD::MSCATTER);
1081     setTargetDAGCombine(ISD::VP_GATHER);
1082     setTargetDAGCombine(ISD::VP_SCATTER);
1083     setTargetDAGCombine(ISD::SRA);
1084     setTargetDAGCombine(ISD::SRL);
1085     setTargetDAGCombine(ISD::SHL);
1086     setTargetDAGCombine(ISD::STORE);
1087   }
1088 
1089   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1090   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1091 }
1092 
1093 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1094                                             LLVMContext &Context,
1095                                             EVT VT) const {
1096   if (!VT.isVector())
1097     return getPointerTy(DL);
1098   if (Subtarget.hasVInstructions() &&
1099       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1100     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1101   return VT.changeVectorElementTypeToInteger();
1102 }
1103 
1104 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1105   return Subtarget.getXLenVT();
1106 }
1107 
1108 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1109                                              const CallInst &I,
1110                                              MachineFunction &MF,
1111                                              unsigned Intrinsic) const {
1112   auto &DL = I.getModule()->getDataLayout();
1113   switch (Intrinsic) {
1114   default:
1115     return false;
1116   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1117   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1118   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1119   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1120   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1121   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1122   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1123   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1124   case Intrinsic::riscv_masked_cmpxchg_i32: {
1125     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
1126     Info.opc = ISD::INTRINSIC_W_CHAIN;
1127     Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
1128     Info.ptrVal = I.getArgOperand(0);
1129     Info.offset = 0;
1130     Info.align = Align(4);
1131     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1132                  MachineMemOperand::MOVolatile;
1133     return true;
1134   }
1135   case Intrinsic::riscv_masked_strided_load:
1136     Info.opc = ISD::INTRINSIC_W_CHAIN;
1137     Info.ptrVal = I.getArgOperand(1);
1138     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1139     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1140     Info.size = MemoryLocation::UnknownSize;
1141     Info.flags |= MachineMemOperand::MOLoad;
1142     return true;
1143   case Intrinsic::riscv_masked_strided_store:
1144     Info.opc = ISD::INTRINSIC_VOID;
1145     Info.ptrVal = I.getArgOperand(1);
1146     Info.memVT =
1147         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1148     Info.align = Align(
1149         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1150         8);
1151     Info.size = MemoryLocation::UnknownSize;
1152     Info.flags |= MachineMemOperand::MOStore;
1153     return true;
1154   }
1155 }
1156 
1157 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1158                                                 const AddrMode &AM, Type *Ty,
1159                                                 unsigned AS,
1160                                                 Instruction *I) const {
1161   // No global is ever allowed as a base.
1162   if (AM.BaseGV)
1163     return false;
1164 
1165   // Require a 12-bit signed offset.
1166   if (!isInt<12>(AM.BaseOffs))
1167     return false;
1168 
1169   switch (AM.Scale) {
1170   case 0: // "r+i" or just "i", depending on HasBaseReg.
1171     break;
1172   case 1:
1173     if (!AM.HasBaseReg) // allow "r+i".
1174       break;
1175     return false; // disallow "r+r" or "r+r+i".
1176   default:
1177     return false;
1178   }
1179 
1180   return true;
1181 }
1182 
1183 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1184   return isInt<12>(Imm);
1185 }
1186 
1187 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1188   return isInt<12>(Imm);
1189 }
1190 
1191 // On RV32, 64-bit integers are split into their high and low parts and held
1192 // in two different registers, so the trunc is free since the low register can
1193 // just be used.
1194 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1195   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1196     return false;
1197   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1198   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1199   return (SrcBits == 64 && DestBits == 32);
1200 }
1201 
1202 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1203   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1204       !SrcVT.isInteger() || !DstVT.isInteger())
1205     return false;
1206   unsigned SrcBits = SrcVT.getSizeInBits();
1207   unsigned DestBits = DstVT.getSizeInBits();
1208   return (SrcBits == 64 && DestBits == 32);
1209 }
1210 
1211 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1212   // Zexts are free if they can be combined with a load.
1213   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1214   // poorly with type legalization of compares preferring sext.
1215   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1216     EVT MemVT = LD->getMemoryVT();
1217     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1218         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1219          LD->getExtensionType() == ISD::ZEXTLOAD))
1220       return true;
1221   }
1222 
1223   return TargetLowering::isZExtFree(Val, VT2);
1224 }
1225 
1226 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1227   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1228 }
1229 
1230 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1231   return Subtarget.hasStdExtZbb();
1232 }
1233 
1234 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1235   return Subtarget.hasStdExtZbb();
1236 }
1237 
1238 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1239   EVT VT = Y.getValueType();
1240 
1241   // FIXME: Support vectors once we have tests.
1242   if (VT.isVector())
1243     return false;
1244 
1245   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1246           Subtarget.hasStdExtZbkb()) &&
1247          !isa<ConstantSDNode>(Y);
1248 }
1249 
1250 /// Check if sinking \p I's operands to I's basic block is profitable, because
1251 /// the operands can be folded into a target instruction, e.g.
1252 /// splats of scalars can fold into vector instructions.
1253 bool RISCVTargetLowering::shouldSinkOperands(
1254     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1255   using namespace llvm::PatternMatch;
1256 
1257   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1258     return false;
1259 
1260   auto IsSinker = [&](Instruction *I, int Operand) {
1261     switch (I->getOpcode()) {
1262     case Instruction::Add:
1263     case Instruction::Sub:
1264     case Instruction::Mul:
1265     case Instruction::And:
1266     case Instruction::Or:
1267     case Instruction::Xor:
1268     case Instruction::FAdd:
1269     case Instruction::FSub:
1270     case Instruction::FMul:
1271     case Instruction::FDiv:
1272     case Instruction::ICmp:
1273     case Instruction::FCmp:
1274       return true;
1275     case Instruction::Shl:
1276     case Instruction::LShr:
1277     case Instruction::AShr:
1278     case Instruction::UDiv:
1279     case Instruction::SDiv:
1280     case Instruction::URem:
1281     case Instruction::SRem:
1282       return Operand == 1;
1283     case Instruction::Call:
1284       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1285         switch (II->getIntrinsicID()) {
1286         case Intrinsic::fma:
1287           return Operand == 0 || Operand == 1;
1288         // FIXME: Our patterns can only match vx/vf instructions when the splat
1289         // it on the RHS, because TableGen doesn't recognize our VP operations
1290         // as commutative.
1291         case Intrinsic::vp_add:
1292         case Intrinsic::vp_mul:
1293         case Intrinsic::vp_and:
1294         case Intrinsic::vp_or:
1295         case Intrinsic::vp_xor:
1296         case Intrinsic::vp_fadd:
1297         case Intrinsic::vp_fmul:
1298         case Intrinsic::vp_shl:
1299         case Intrinsic::vp_lshr:
1300         case Intrinsic::vp_ashr:
1301         case Intrinsic::vp_udiv:
1302         case Intrinsic::vp_sdiv:
1303         case Intrinsic::vp_urem:
1304         case Intrinsic::vp_srem:
1305           return Operand == 1;
1306         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1307         // explicit patterns for both LHS and RHS (as 'vr' versions).
1308         case Intrinsic::vp_sub:
1309         case Intrinsic::vp_fsub:
1310         case Intrinsic::vp_fdiv:
1311           return Operand == 0 || Operand == 1;
1312         default:
1313           return false;
1314         }
1315       }
1316       return false;
1317     default:
1318       return false;
1319     }
1320   };
1321 
1322   for (auto OpIdx : enumerate(I->operands())) {
1323     if (!IsSinker(I, OpIdx.index()))
1324       continue;
1325 
1326     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1327     // Make sure we are not already sinking this operand
1328     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1329       continue;
1330 
1331     // We are looking for a splat that can be sunk.
1332     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1333                              m_Undef(), m_ZeroMask())))
1334       continue;
1335 
1336     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1337     // and vector registers
1338     for (Use &U : Op->uses()) {
1339       Instruction *Insn = cast<Instruction>(U.getUser());
1340       if (!IsSinker(Insn, U.getOperandNo()))
1341         return false;
1342     }
1343 
1344     Ops.push_back(&Op->getOperandUse(0));
1345     Ops.push_back(&OpIdx.value());
1346   }
1347   return true;
1348 }
1349 
1350 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1351                                        bool ForCodeSize) const {
1352   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1353   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1354     return false;
1355   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1356     return false;
1357   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1358     return false;
1359   return Imm.isZero();
1360 }
1361 
1362 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1363   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1364          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1365          (VT == MVT::f64 && Subtarget.hasStdExtD());
1366 }
1367 
1368 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1369                                                       CallingConv::ID CC,
1370                                                       EVT VT) const {
1371   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1372   // We might still end up using a GPR but that will be decided based on ABI.
1373   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1374   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1375     return MVT::f32;
1376 
1377   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1378 }
1379 
1380 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1381                                                            CallingConv::ID CC,
1382                                                            EVT VT) const {
1383   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1384   // We might still end up using a GPR but that will be decided based on ABI.
1385   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1386   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1387     return 1;
1388 
1389   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1390 }
1391 
1392 // Changes the condition code and swaps operands if necessary, so the SetCC
1393 // operation matches one of the comparisons supported directly by branches
1394 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1395 // with 1/-1.
1396 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1397                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1398   // Convert X > -1 to X >= 0.
1399   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1400     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1401     CC = ISD::SETGE;
1402     return;
1403   }
1404   // Convert X < 1 to 0 >= X.
1405   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1406     RHS = LHS;
1407     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1408     CC = ISD::SETGE;
1409     return;
1410   }
1411 
1412   switch (CC) {
1413   default:
1414     break;
1415   case ISD::SETGT:
1416   case ISD::SETLE:
1417   case ISD::SETUGT:
1418   case ISD::SETULE:
1419     CC = ISD::getSetCCSwappedOperands(CC);
1420     std::swap(LHS, RHS);
1421     break;
1422   }
1423 }
1424 
1425 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1426   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1427   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1428   if (VT.getVectorElementType() == MVT::i1)
1429     KnownSize *= 8;
1430 
1431   switch (KnownSize) {
1432   default:
1433     llvm_unreachable("Invalid LMUL.");
1434   case 8:
1435     return RISCVII::VLMUL::LMUL_F8;
1436   case 16:
1437     return RISCVII::VLMUL::LMUL_F4;
1438   case 32:
1439     return RISCVII::VLMUL::LMUL_F2;
1440   case 64:
1441     return RISCVII::VLMUL::LMUL_1;
1442   case 128:
1443     return RISCVII::VLMUL::LMUL_2;
1444   case 256:
1445     return RISCVII::VLMUL::LMUL_4;
1446   case 512:
1447     return RISCVII::VLMUL::LMUL_8;
1448   }
1449 }
1450 
1451 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1452   switch (LMul) {
1453   default:
1454     llvm_unreachable("Invalid LMUL.");
1455   case RISCVII::VLMUL::LMUL_F8:
1456   case RISCVII::VLMUL::LMUL_F4:
1457   case RISCVII::VLMUL::LMUL_F2:
1458   case RISCVII::VLMUL::LMUL_1:
1459     return RISCV::VRRegClassID;
1460   case RISCVII::VLMUL::LMUL_2:
1461     return RISCV::VRM2RegClassID;
1462   case RISCVII::VLMUL::LMUL_4:
1463     return RISCV::VRM4RegClassID;
1464   case RISCVII::VLMUL::LMUL_8:
1465     return RISCV::VRM8RegClassID;
1466   }
1467 }
1468 
1469 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1470   RISCVII::VLMUL LMUL = getLMUL(VT);
1471   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1472       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1473       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1474       LMUL == RISCVII::VLMUL::LMUL_1) {
1475     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1476                   "Unexpected subreg numbering");
1477     return RISCV::sub_vrm1_0 + Index;
1478   }
1479   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1480     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1481                   "Unexpected subreg numbering");
1482     return RISCV::sub_vrm2_0 + Index;
1483   }
1484   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1485     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1486                   "Unexpected subreg numbering");
1487     return RISCV::sub_vrm4_0 + Index;
1488   }
1489   llvm_unreachable("Invalid vector type.");
1490 }
1491 
1492 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1493   if (VT.getVectorElementType() == MVT::i1)
1494     return RISCV::VRRegClassID;
1495   return getRegClassIDForLMUL(getLMUL(VT));
1496 }
1497 
1498 // Attempt to decompose a subvector insert/extract between VecVT and
1499 // SubVecVT via subregister indices. Returns the subregister index that
1500 // can perform the subvector insert/extract with the given element index, as
1501 // well as the index corresponding to any leftover subvectors that must be
1502 // further inserted/extracted within the register class for SubVecVT.
1503 std::pair<unsigned, unsigned>
1504 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1505     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1506     const RISCVRegisterInfo *TRI) {
1507   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1508                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1509                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1510                 "Register classes not ordered");
1511   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1512   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1513   // Try to compose a subregister index that takes us from the incoming
1514   // LMUL>1 register class down to the outgoing one. At each step we half
1515   // the LMUL:
1516   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1517   // Note that this is not guaranteed to find a subregister index, such as
1518   // when we are extracting from one VR type to another.
1519   unsigned SubRegIdx = RISCV::NoSubRegister;
1520   for (const unsigned RCID :
1521        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1522     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1523       VecVT = VecVT.getHalfNumVectorElementsVT();
1524       bool IsHi =
1525           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1526       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1527                                             getSubregIndexByMVT(VecVT, IsHi));
1528       if (IsHi)
1529         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1530     }
1531   return {SubRegIdx, InsertExtractIdx};
1532 }
1533 
1534 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1535 // stores for those types.
1536 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1537   return !Subtarget.useRVVForFixedLengthVectors() ||
1538          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1539 }
1540 
1541 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1542   if (ScalarTy->isPointerTy())
1543     return true;
1544 
1545   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1546       ScalarTy->isIntegerTy(32))
1547     return true;
1548 
1549   if (ScalarTy->isIntegerTy(64))
1550     return Subtarget.hasVInstructionsI64();
1551 
1552   if (ScalarTy->isHalfTy())
1553     return Subtarget.hasVInstructionsF16();
1554   if (ScalarTy->isFloatTy())
1555     return Subtarget.hasVInstructionsF32();
1556   if (ScalarTy->isDoubleTy())
1557     return Subtarget.hasVInstructionsF64();
1558 
1559   return false;
1560 }
1561 
1562 static SDValue getVLOperand(SDValue Op) {
1563   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1564           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1565          "Unexpected opcode");
1566   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1567   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1568   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1569       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1570   if (!II)
1571     return SDValue();
1572   return Op.getOperand(II->VLOperand + 1 + HasChain);
1573 }
1574 
1575 static bool useRVVForFixedLengthVectorVT(MVT VT,
1576                                          const RISCVSubtarget &Subtarget) {
1577   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1578   if (!Subtarget.useRVVForFixedLengthVectors())
1579     return false;
1580 
1581   // We only support a set of vector types with a consistent maximum fixed size
1582   // across all supported vector element types to avoid legalization issues.
1583   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1584   // fixed-length vector type we support is 1024 bytes.
1585   if (VT.getFixedSizeInBits() > 1024 * 8)
1586     return false;
1587 
1588   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1589 
1590   MVT EltVT = VT.getVectorElementType();
1591 
1592   // Don't use RVV for vectors we cannot scalarize if required.
1593   switch (EltVT.SimpleTy) {
1594   // i1 is supported but has different rules.
1595   default:
1596     return false;
1597   case MVT::i1:
1598     // Masks can only use a single register.
1599     if (VT.getVectorNumElements() > MinVLen)
1600       return false;
1601     MinVLen /= 8;
1602     break;
1603   case MVT::i8:
1604   case MVT::i16:
1605   case MVT::i32:
1606     break;
1607   case MVT::i64:
1608     if (!Subtarget.hasVInstructionsI64())
1609       return false;
1610     break;
1611   case MVT::f16:
1612     if (!Subtarget.hasVInstructionsF16())
1613       return false;
1614     break;
1615   case MVT::f32:
1616     if (!Subtarget.hasVInstructionsF32())
1617       return false;
1618     break;
1619   case MVT::f64:
1620     if (!Subtarget.hasVInstructionsF64())
1621       return false;
1622     break;
1623   }
1624 
1625   // Reject elements larger than ELEN.
1626   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1627     return false;
1628 
1629   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1630   // Don't use RVV for types that don't fit.
1631   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1632     return false;
1633 
1634   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1635   // the base fixed length RVV support in place.
1636   if (!VT.isPow2VectorType())
1637     return false;
1638 
1639   return true;
1640 }
1641 
1642 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1643   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1644 }
1645 
1646 // Return the largest legal scalable vector type that matches VT's element type.
1647 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1648                                             const RISCVSubtarget &Subtarget) {
1649   // This may be called before legal types are setup.
1650   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1651           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1652          "Expected legal fixed length vector!");
1653 
1654   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1655   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1656 
1657   MVT EltVT = VT.getVectorElementType();
1658   switch (EltVT.SimpleTy) {
1659   default:
1660     llvm_unreachable("unexpected element type for RVV container");
1661   case MVT::i1:
1662   case MVT::i8:
1663   case MVT::i16:
1664   case MVT::i32:
1665   case MVT::i64:
1666   case MVT::f16:
1667   case MVT::f32:
1668   case MVT::f64: {
1669     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1670     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1671     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1672     unsigned NumElts =
1673         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1674     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1675     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1676     return MVT::getScalableVectorVT(EltVT, NumElts);
1677   }
1678   }
1679 }
1680 
1681 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1682                                             const RISCVSubtarget &Subtarget) {
1683   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1684                                           Subtarget);
1685 }
1686 
1687 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1688   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1689 }
1690 
1691 // Grow V to consume an entire RVV register.
1692 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1693                                        const RISCVSubtarget &Subtarget) {
1694   assert(VT.isScalableVector() &&
1695          "Expected to convert into a scalable vector!");
1696   assert(V.getValueType().isFixedLengthVector() &&
1697          "Expected a fixed length vector operand!");
1698   SDLoc DL(V);
1699   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1700   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1701 }
1702 
1703 // Shrink V so it's just big enough to maintain a VT's worth of data.
1704 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1705                                          const RISCVSubtarget &Subtarget) {
1706   assert(VT.isFixedLengthVector() &&
1707          "Expected to convert into a fixed length vector!");
1708   assert(V.getValueType().isScalableVector() &&
1709          "Expected a scalable vector operand!");
1710   SDLoc DL(V);
1711   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1712   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1713 }
1714 
1715 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1716 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1717 // the vector type that it is contained in.
1718 static std::pair<SDValue, SDValue>
1719 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1720                 const RISCVSubtarget &Subtarget) {
1721   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1722   MVT XLenVT = Subtarget.getXLenVT();
1723   SDValue VL = VecVT.isFixedLengthVector()
1724                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1725                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1726   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1727   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1728   return {Mask, VL};
1729 }
1730 
1731 // As above but assuming the given type is a scalable vector type.
1732 static std::pair<SDValue, SDValue>
1733 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1734                         const RISCVSubtarget &Subtarget) {
1735   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1736   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1737 }
1738 
1739 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1740 // of either is (currently) supported. This can get us into an infinite loop
1741 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1742 // as a ..., etc.
1743 // Until either (or both) of these can reliably lower any node, reporting that
1744 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1745 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1746 // which is not desirable.
1747 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1748     EVT VT, unsigned DefinedValues) const {
1749   return false;
1750 }
1751 
1752 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1753   // Only splats are currently supported.
1754   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1755     return true;
1756 
1757   return false;
1758 }
1759 
1760 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1761                                   const RISCVSubtarget &Subtarget) {
1762   // RISCV FP-to-int conversions saturate to the destination register size, but
1763   // don't produce 0 for nan. We can use a conversion instruction and fix the
1764   // nan case with a compare and a select.
1765   SDValue Src = Op.getOperand(0);
1766 
1767   EVT DstVT = Op.getValueType();
1768   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1769 
1770   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1771   unsigned Opc;
1772   if (SatVT == DstVT)
1773     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1774   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1775     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1776   else
1777     return SDValue();
1778   // FIXME: Support other SatVTs by clamping before or after the conversion.
1779 
1780   SDLoc DL(Op);
1781   SDValue FpToInt = DAG.getNode(
1782       Opc, DL, DstVT, Src,
1783       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1784 
1785   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1786   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1787 }
1788 
1789 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1790 // and back. Taking care to avoid converting values that are nan or already
1791 // correct.
1792 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1793 // have FRM dependencies modeled yet.
1794 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1795   MVT VT = Op.getSimpleValueType();
1796   assert(VT.isVector() && "Unexpected type");
1797 
1798   SDLoc DL(Op);
1799 
1800   // Freeze the source since we are increasing the number of uses.
1801   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1802 
1803   // Truncate to integer and convert back to FP.
1804   MVT IntVT = VT.changeVectorElementTypeToInteger();
1805   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1806   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1807 
1808   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1809 
1810   if (Op.getOpcode() == ISD::FCEIL) {
1811     // If the truncated value is the greater than or equal to the original
1812     // value, we've computed the ceil. Otherwise, we went the wrong way and
1813     // need to increase by 1.
1814     // FIXME: This should use a masked operation. Handle here or in isel?
1815     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1816                                  DAG.getConstantFP(1.0, DL, VT));
1817     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1818     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1819   } else if (Op.getOpcode() == ISD::FFLOOR) {
1820     // If the truncated value is the less than or equal to the original value,
1821     // we've computed the floor. Otherwise, we went the wrong way and need to
1822     // decrease by 1.
1823     // FIXME: This should use a masked operation. Handle here or in isel?
1824     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1825                                  DAG.getConstantFP(1.0, DL, VT));
1826     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1827     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1828   }
1829 
1830   // Restore the original sign so that -0.0 is preserved.
1831   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1832 
1833   // Determine the largest integer that can be represented exactly. This and
1834   // values larger than it don't have any fractional bits so don't need to
1835   // be converted.
1836   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1837   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1838   APFloat MaxVal = APFloat(FltSem);
1839   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1840                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1841   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1842 
1843   // If abs(Src) was larger than MaxVal or nan, keep it.
1844   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1845   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1846   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1847 }
1848 
1849 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1850                                  const RISCVSubtarget &Subtarget) {
1851   MVT VT = Op.getSimpleValueType();
1852   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1853 
1854   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1855 
1856   SDLoc DL(Op);
1857   SDValue Mask, VL;
1858   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1859 
1860   unsigned Opc =
1861       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1862   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1863   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1864 }
1865 
1866 struct VIDSequence {
1867   int64_t StepNumerator;
1868   unsigned StepDenominator;
1869   int64_t Addend;
1870 };
1871 
1872 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1873 // to the (non-zero) step S and start value X. This can be then lowered as the
1874 // RVV sequence (VID * S) + X, for example.
1875 // The step S is represented as an integer numerator divided by a positive
1876 // denominator. Note that the implementation currently only identifies
1877 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1878 // cannot detect 2/3, for example.
1879 // Note that this method will also match potentially unappealing index
1880 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1881 // determine whether this is worth generating code for.
1882 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1883   unsigned NumElts = Op.getNumOperands();
1884   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1885   if (!Op.getValueType().isInteger())
1886     return None;
1887 
1888   Optional<unsigned> SeqStepDenom;
1889   Optional<int64_t> SeqStepNum, SeqAddend;
1890   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1891   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1892   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1893     // Assume undef elements match the sequence; we just have to be careful
1894     // when interpolating across them.
1895     if (Op.getOperand(Idx).isUndef())
1896       continue;
1897     // The BUILD_VECTOR must be all constants.
1898     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1899       return None;
1900 
1901     uint64_t Val = Op.getConstantOperandVal(Idx) &
1902                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1903 
1904     if (PrevElt) {
1905       // Calculate the step since the last non-undef element, and ensure
1906       // it's consistent across the entire sequence.
1907       unsigned IdxDiff = Idx - PrevElt->second;
1908       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1909 
1910       // A zero-value value difference means that we're somewhere in the middle
1911       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1912       // step change before evaluating the sequence.
1913       if (ValDiff != 0) {
1914         int64_t Remainder = ValDiff % IdxDiff;
1915         // Normalize the step if it's greater than 1.
1916         if (Remainder != ValDiff) {
1917           // The difference must cleanly divide the element span.
1918           if (Remainder != 0)
1919             return None;
1920           ValDiff /= IdxDiff;
1921           IdxDiff = 1;
1922         }
1923 
1924         if (!SeqStepNum)
1925           SeqStepNum = ValDiff;
1926         else if (ValDiff != SeqStepNum)
1927           return None;
1928 
1929         if (!SeqStepDenom)
1930           SeqStepDenom = IdxDiff;
1931         else if (IdxDiff != *SeqStepDenom)
1932           return None;
1933       }
1934     }
1935 
1936     // Record and/or check any addend.
1937     if (SeqStepNum && SeqStepDenom) {
1938       uint64_t ExpectedVal =
1939           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1940       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1941       if (!SeqAddend)
1942         SeqAddend = Addend;
1943       else if (SeqAddend != Addend)
1944         return None;
1945     }
1946 
1947     // Record this non-undef element for later.
1948     if (!PrevElt || PrevElt->first != Val)
1949       PrevElt = std::make_pair(Val, Idx);
1950   }
1951   // We need to have logged both a step and an addend for this to count as
1952   // a legal index sequence.
1953   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1954     return None;
1955 
1956   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1957 }
1958 
1959 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1960                                  const RISCVSubtarget &Subtarget) {
1961   MVT VT = Op.getSimpleValueType();
1962   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1963 
1964   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1965 
1966   SDLoc DL(Op);
1967   SDValue Mask, VL;
1968   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1969 
1970   MVT XLenVT = Subtarget.getXLenVT();
1971   unsigned NumElts = Op.getNumOperands();
1972 
1973   if (VT.getVectorElementType() == MVT::i1) {
1974     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1975       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1976       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1977     }
1978 
1979     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1980       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1981       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1982     }
1983 
1984     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1985     // scalar integer chunks whose bit-width depends on the number of mask
1986     // bits and XLEN.
1987     // First, determine the most appropriate scalar integer type to use. This
1988     // is at most XLenVT, but may be shrunk to a smaller vector element type
1989     // according to the size of the final vector - use i8 chunks rather than
1990     // XLenVT if we're producing a v8i1. This results in more consistent
1991     // codegen across RV32 and RV64.
1992     unsigned NumViaIntegerBits =
1993         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1994     NumViaIntegerBits = std::min(NumViaIntegerBits,
1995                                  Subtarget.getMaxELENForFixedLengthVectors());
1996     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1997       // If we have to use more than one INSERT_VECTOR_ELT then this
1998       // optimization is likely to increase code size; avoid peforming it in
1999       // such a case. We can use a load from a constant pool in this case.
2000       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2001         return SDValue();
2002       // Now we can create our integer vector type. Note that it may be larger
2003       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2004       MVT IntegerViaVecVT =
2005           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2006                            divideCeil(NumElts, NumViaIntegerBits));
2007 
2008       uint64_t Bits = 0;
2009       unsigned BitPos = 0, IntegerEltIdx = 0;
2010       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2011 
2012       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2013         // Once we accumulate enough bits to fill our scalar type, insert into
2014         // our vector and clear our accumulated data.
2015         if (I != 0 && I % NumViaIntegerBits == 0) {
2016           if (NumViaIntegerBits <= 32)
2017             Bits = SignExtend64(Bits, 32);
2018           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2019           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2020                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2021           Bits = 0;
2022           BitPos = 0;
2023           IntegerEltIdx++;
2024         }
2025         SDValue V = Op.getOperand(I);
2026         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2027         Bits |= ((uint64_t)BitValue << BitPos);
2028       }
2029 
2030       // Insert the (remaining) scalar value into position in our integer
2031       // vector type.
2032       if (NumViaIntegerBits <= 32)
2033         Bits = SignExtend64(Bits, 32);
2034       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2035       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2036                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2037 
2038       if (NumElts < NumViaIntegerBits) {
2039         // If we're producing a smaller vector than our minimum legal integer
2040         // type, bitcast to the equivalent (known-legal) mask type, and extract
2041         // our final mask.
2042         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2043         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2044         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2045                           DAG.getConstant(0, DL, XLenVT));
2046       } else {
2047         // Else we must have produced an integer type with the same size as the
2048         // mask type; bitcast for the final result.
2049         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2050         Vec = DAG.getBitcast(VT, Vec);
2051       }
2052 
2053       return Vec;
2054     }
2055 
2056     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2057     // vector type, we have a legal equivalently-sized i8 type, so we can use
2058     // that.
2059     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2060     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2061 
2062     SDValue WideVec;
2063     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2064       // For a splat, perform a scalar truncate before creating the wider
2065       // vector.
2066       assert(Splat.getValueType() == XLenVT &&
2067              "Unexpected type for i1 splat value");
2068       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2069                           DAG.getConstant(1, DL, XLenVT));
2070       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2071     } else {
2072       SmallVector<SDValue, 8> Ops(Op->op_values());
2073       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2074       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2075       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2076     }
2077 
2078     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2079   }
2080 
2081   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2082     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2083                                         : RISCVISD::VMV_V_X_VL;
2084     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
2085     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2086   }
2087 
2088   // Try and match index sequences, which we can lower to the vid instruction
2089   // with optional modifications. An all-undef vector is matched by
2090   // getSplatValue, above.
2091   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2092     int64_t StepNumerator = SimpleVID->StepNumerator;
2093     unsigned StepDenominator = SimpleVID->StepDenominator;
2094     int64_t Addend = SimpleVID->Addend;
2095 
2096     assert(StepNumerator != 0 && "Invalid step");
2097     bool Negate = false;
2098     int64_t SplatStepVal = StepNumerator;
2099     unsigned StepOpcode = ISD::MUL;
2100     if (StepNumerator != 1) {
2101       if (isPowerOf2_64(std::abs(StepNumerator))) {
2102         Negate = StepNumerator < 0;
2103         StepOpcode = ISD::SHL;
2104         SplatStepVal = Log2_64(std::abs(StepNumerator));
2105       }
2106     }
2107 
2108     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2109     // threshold since it's the immediate value many RVV instructions accept.
2110     // There is no vmul.vi instruction so ensure multiply constant can fit in
2111     // a single addi instruction.
2112     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2113          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2114         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2115       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2116       // Convert right out of the scalable type so we can use standard ISD
2117       // nodes for the rest of the computation. If we used scalable types with
2118       // these, we'd lose the fixed-length vector info and generate worse
2119       // vsetvli code.
2120       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2121       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2122           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2123         SDValue SplatStep = DAG.getSplatVector(
2124             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2125         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2126       }
2127       if (StepDenominator != 1) {
2128         SDValue SplatStep = DAG.getSplatVector(
2129             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2130         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2131       }
2132       if (Addend != 0 || Negate) {
2133         SDValue SplatAddend =
2134             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2135         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2136       }
2137       return VID;
2138     }
2139   }
2140 
2141   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2142   // when re-interpreted as a vector with a larger element type. For example,
2143   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2144   // could be instead splat as
2145   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2146   // TODO: This optimization could also work on non-constant splats, but it
2147   // would require bit-manipulation instructions to construct the splat value.
2148   SmallVector<SDValue> Sequence;
2149   unsigned EltBitSize = VT.getScalarSizeInBits();
2150   const auto *BV = cast<BuildVectorSDNode>(Op);
2151   if (VT.isInteger() && EltBitSize < 64 &&
2152       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2153       BV->getRepeatedSequence(Sequence) &&
2154       (Sequence.size() * EltBitSize) <= 64) {
2155     unsigned SeqLen = Sequence.size();
2156     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2157     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2158     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2159             ViaIntVT == MVT::i64) &&
2160            "Unexpected sequence type");
2161 
2162     unsigned EltIdx = 0;
2163     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2164     uint64_t SplatValue = 0;
2165     // Construct the amalgamated value which can be splatted as this larger
2166     // vector type.
2167     for (const auto &SeqV : Sequence) {
2168       if (!SeqV.isUndef())
2169         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2170                        << (EltIdx * EltBitSize));
2171       EltIdx++;
2172     }
2173 
2174     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2175     // achieve better constant materializion.
2176     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2177       SplatValue = SignExtend64(SplatValue, 32);
2178 
2179     // Since we can't introduce illegal i64 types at this stage, we can only
2180     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2181     // way we can use RVV instructions to splat.
2182     assert((ViaIntVT.bitsLE(XLenVT) ||
2183             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2184            "Unexpected bitcast sequence");
2185     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2186       SDValue ViaVL =
2187           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2188       MVT ViaContainerVT =
2189           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2190       SDValue Splat =
2191           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2192                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2193       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2194       return DAG.getBitcast(VT, Splat);
2195     }
2196   }
2197 
2198   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2199   // which constitute a large proportion of the elements. In such cases we can
2200   // splat a vector with the dominant element and make up the shortfall with
2201   // INSERT_VECTOR_ELTs.
2202   // Note that this includes vectors of 2 elements by association. The
2203   // upper-most element is the "dominant" one, allowing us to use a splat to
2204   // "insert" the upper element, and an insert of the lower element at position
2205   // 0, which improves codegen.
2206   SDValue DominantValue;
2207   unsigned MostCommonCount = 0;
2208   DenseMap<SDValue, unsigned> ValueCounts;
2209   unsigned NumUndefElts =
2210       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2211 
2212   // Track the number of scalar loads we know we'd be inserting, estimated as
2213   // any non-zero floating-point constant. Other kinds of element are either
2214   // already in registers or are materialized on demand. The threshold at which
2215   // a vector load is more desirable than several scalar materializion and
2216   // vector-insertion instructions is not known.
2217   unsigned NumScalarLoads = 0;
2218 
2219   for (SDValue V : Op->op_values()) {
2220     if (V.isUndef())
2221       continue;
2222 
2223     ValueCounts.insert(std::make_pair(V, 0));
2224     unsigned &Count = ValueCounts[V];
2225 
2226     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2227       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2228 
2229     // Is this value dominant? In case of a tie, prefer the highest element as
2230     // it's cheaper to insert near the beginning of a vector than it is at the
2231     // end.
2232     if (++Count >= MostCommonCount) {
2233       DominantValue = V;
2234       MostCommonCount = Count;
2235     }
2236   }
2237 
2238   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2239   unsigned NumDefElts = NumElts - NumUndefElts;
2240   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2241 
2242   // Don't perform this optimization when optimizing for size, since
2243   // materializing elements and inserting them tends to cause code bloat.
2244   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2245       ((MostCommonCount > DominantValueCountThreshold) ||
2246        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2247     // Start by splatting the most common element.
2248     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2249 
2250     DenseSet<SDValue> Processed{DominantValue};
2251     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2252     for (const auto &OpIdx : enumerate(Op->ops())) {
2253       const SDValue &V = OpIdx.value();
2254       if (V.isUndef() || !Processed.insert(V).second)
2255         continue;
2256       if (ValueCounts[V] == 1) {
2257         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2258                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2259       } else {
2260         // Blend in all instances of this value using a VSELECT, using a
2261         // mask where each bit signals whether that element is the one
2262         // we're after.
2263         SmallVector<SDValue> Ops;
2264         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2265           return DAG.getConstant(V == V1, DL, XLenVT);
2266         });
2267         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2268                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2269                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2270       }
2271     }
2272 
2273     return Vec;
2274   }
2275 
2276   return SDValue();
2277 }
2278 
2279 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2280                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2281   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2282     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2283     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2284     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2285     // node in order to try and match RVV vector/scalar instructions.
2286     if ((LoC >> 31) == HiC)
2287       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2288 
2289     // If vl is equal to VLMax and Hi constant is equal to Lo, we could use
2290     // vmv.v.x whose EEW = 32 to lower it.
2291     auto *Const = dyn_cast<ConstantSDNode>(VL);
2292     if (LoC == HiC && Const && Const->getSExtValue() == RISCV::VLMaxSentinel) {
2293       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2294       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2295       // access the subtarget here now.
2296       auto InterVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterVT, Lo, VL);
2297       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2298     }
2299   }
2300 
2301   // Fall back to a stack store and stride x0 vector load.
2302   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2303 }
2304 
2305 // Called by type legalization to handle splat of i64 on RV32.
2306 // FIXME: We can optimize this when the type has sign or zero bits in one
2307 // of the halves.
2308 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2309                                    SDValue VL, SelectionDAG &DAG) {
2310   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2311   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2312                            DAG.getConstant(0, DL, MVT::i32));
2313   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2314                            DAG.getConstant(1, DL, MVT::i32));
2315   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2316 }
2317 
2318 // This function lowers a splat of a scalar operand Splat with the vector
2319 // length VL. It ensures the final sequence is type legal, which is useful when
2320 // lowering a splat after type legalization.
2321 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2322                                 SelectionDAG &DAG,
2323                                 const RISCVSubtarget &Subtarget) {
2324   if (VT.isFloatingPoint()) {
2325     // If VL is 1, we could use vfmv.s.f.
2326     if (isOneConstant(VL))
2327       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT),
2328                          Scalar, VL);
2329     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2330   }
2331 
2332   MVT XLenVT = Subtarget.getXLenVT();
2333 
2334   // Simplest case is that the operand needs to be promoted to XLenVT.
2335   if (Scalar.getValueType().bitsLE(XLenVT)) {
2336     // If the operand is a constant, sign extend to increase our chances
2337     // of being able to use a .vi instruction. ANY_EXTEND would become a
2338     // a zero extend and the simm5 check in isel would fail.
2339     // FIXME: Should we ignore the upper bits in isel instead?
2340     unsigned ExtOpc =
2341         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2342     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2343     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2344     // If VL is 1 and the scalar value won't benefit from immediate, we could
2345     // use vmv.s.x.
2346     if (isOneConstant(VL) &&
2347         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2348       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar,
2349                          VL);
2350     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2351   }
2352 
2353   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2354          "Unexpected scalar for splat lowering!");
2355 
2356   if (isOneConstant(VL) && isNullConstant(Scalar))
2357     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT),
2358                        DAG.getConstant(0, DL, XLenVT), VL);
2359 
2360   // Otherwise use the more complicated splatting algorithm.
2361   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2362 }
2363 
2364 // Is the mask a slidedown that shifts in undefs.
2365 static int matchShuffleAsSlideDown(ArrayRef<int> Mask) {
2366   int Size = Mask.size();
2367 
2368   // Elements shifted in should be undef.
2369   auto CheckUndefs = [&](int Shift) {
2370     for (int i = Size - Shift; i != Size; ++i)
2371       if (Mask[i] >= 0)
2372         return false;
2373     return true;
2374   };
2375 
2376   // Elements should be shifted or undef.
2377   auto MatchShift = [&](int Shift) {
2378     for (int i = 0; i != Size - Shift; ++i)
2379        if (Mask[i] >= 0 && Mask[i] != Shift + i)
2380          return false;
2381     return true;
2382   };
2383 
2384   // Try all possible shifts.
2385   for (int Shift = 1; Shift != Size; ++Shift)
2386     if (CheckUndefs(Shift) && MatchShift(Shift))
2387       return Shift;
2388 
2389   // No match.
2390   return -1;
2391 }
2392 
2393 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2394                                 const RISCVSubtarget &Subtarget) {
2395   // We need to be able to widen elements to the next larger integer type.
2396   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2397     return false;
2398 
2399   int Size = Mask.size();
2400   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2401 
2402   int Srcs[] = {-1, -1};
2403   for (int i = 0; i != Size; ++i) {
2404     // Ignore undef elements.
2405     if (Mask[i] < 0)
2406       continue;
2407 
2408     // Is this an even or odd element.
2409     int Pol = i % 2;
2410 
2411     // Ensure we consistently use the same source for this element polarity.
2412     int Src = Mask[i] / Size;
2413     if (Srcs[Pol] < 0)
2414       Srcs[Pol] = Src;
2415     if (Srcs[Pol] != Src)
2416       return false;
2417 
2418     // Make sure the element within the source is appropriate for this element
2419     // in the destination.
2420     int Elt = Mask[i] % Size;
2421     if (Elt != i / 2)
2422       return false;
2423   }
2424 
2425   // We need to find a source for each polarity and they can't be the same.
2426   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2427     return false;
2428 
2429   // Swap the sources if the second source was in the even polarity.
2430   SwapSources = Srcs[0] > Srcs[1];
2431 
2432   return true;
2433 }
2434 
2435 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2436                                    const RISCVSubtarget &Subtarget) {
2437   SDValue V1 = Op.getOperand(0);
2438   SDValue V2 = Op.getOperand(1);
2439   SDLoc DL(Op);
2440   MVT XLenVT = Subtarget.getXLenVT();
2441   MVT VT = Op.getSimpleValueType();
2442   unsigned NumElts = VT.getVectorNumElements();
2443   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2444 
2445   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2446 
2447   SDValue TrueMask, VL;
2448   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2449 
2450   if (SVN->isSplat()) {
2451     const int Lane = SVN->getSplatIndex();
2452     if (Lane >= 0) {
2453       MVT SVT = VT.getVectorElementType();
2454 
2455       // Turn splatted vector load into a strided load with an X0 stride.
2456       SDValue V = V1;
2457       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2458       // with undef.
2459       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2460       int Offset = Lane;
2461       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2462         int OpElements =
2463             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2464         V = V.getOperand(Offset / OpElements);
2465         Offset %= OpElements;
2466       }
2467 
2468       // We need to ensure the load isn't atomic or volatile.
2469       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2470         auto *Ld = cast<LoadSDNode>(V);
2471         Offset *= SVT.getStoreSize();
2472         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2473                                                    TypeSize::Fixed(Offset), DL);
2474 
2475         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2476         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2477           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2478           SDValue IntID =
2479               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2480           SDValue Ops[] = {Ld->getChain(),
2481                            IntID,
2482                            DAG.getUNDEF(ContainerVT),
2483                            NewAddr,
2484                            DAG.getRegister(RISCV::X0, XLenVT),
2485                            VL};
2486           SDValue NewLoad = DAG.getMemIntrinsicNode(
2487               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2488               DAG.getMachineFunction().getMachineMemOperand(
2489                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2490           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2491           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2492         }
2493 
2494         // Otherwise use a scalar load and splat. This will give the best
2495         // opportunity to fold a splat into the operation. ISel can turn it into
2496         // the x0 strided load if we aren't able to fold away the select.
2497         if (SVT.isFloatingPoint())
2498           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2499                           Ld->getPointerInfo().getWithOffset(Offset),
2500                           Ld->getOriginalAlign(),
2501                           Ld->getMemOperand()->getFlags());
2502         else
2503           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2504                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2505                              Ld->getOriginalAlign(),
2506                              Ld->getMemOperand()->getFlags());
2507         DAG.makeEquivalentMemoryOrdering(Ld, V);
2508 
2509         unsigned Opc =
2510             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2511         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2512         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2513       }
2514 
2515       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2516       assert(Lane < (int)NumElts && "Unexpected lane!");
2517       SDValue Gather =
2518           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2519                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2520       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2521     }
2522   }
2523 
2524   ArrayRef<int> Mask = SVN->getMask();
2525 
2526   // Try to match as a slidedown.
2527   int SlideAmt = matchShuffleAsSlideDown(Mask);
2528   if (SlideAmt >= 0) {
2529     // TODO: Should we reduce the VL to account for the upper undef elements?
2530     // Requires additional vsetvlis, but might be faster to execute.
2531     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2532     SDValue SlideDown =
2533         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2534                     DAG.getUNDEF(ContainerVT), V1,
2535                     DAG.getConstant(SlideAmt, DL, XLenVT),
2536                     TrueMask, VL);
2537     return convertFromScalableVector(VT, SlideDown, DAG, Subtarget);
2538   }
2539 
2540   // Detect an interleave shuffle and lower to
2541   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2542   bool SwapSources;
2543   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2544     // Swap sources if needed.
2545     if (SwapSources)
2546       std::swap(V1, V2);
2547 
2548     // Extract the lower half of the vectors.
2549     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2550     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2551                      DAG.getConstant(0, DL, XLenVT));
2552     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2553                      DAG.getConstant(0, DL, XLenVT));
2554 
2555     // Double the element width and halve the number of elements in an int type.
2556     unsigned EltBits = VT.getScalarSizeInBits();
2557     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2558     MVT WideIntVT =
2559         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2560     // Convert this to a scalable vector. We need to base this on the
2561     // destination size to ensure there's always a type with a smaller LMUL.
2562     MVT WideIntContainerVT =
2563         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2564 
2565     // Convert sources to scalable vectors with the same element count as the
2566     // larger type.
2567     MVT HalfContainerVT = MVT::getVectorVT(
2568         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2569     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2570     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2571 
2572     // Cast sources to integer.
2573     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2574     MVT IntHalfVT =
2575         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2576     V1 = DAG.getBitcast(IntHalfVT, V1);
2577     V2 = DAG.getBitcast(IntHalfVT, V2);
2578 
2579     // Freeze V2 since we use it twice and we need to be sure that the add and
2580     // multiply see the same value.
2581     V2 = DAG.getNode(ISD::FREEZE, DL, IntHalfVT, V2);
2582 
2583     // Recreate TrueMask using the widened type's element count.
2584     MVT MaskVT =
2585         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2586     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2587 
2588     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2589     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2590                               V2, TrueMask, VL);
2591     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2592     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2593                                      DAG.getAllOnesConstant(DL, XLenVT));
2594     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2595                                    V2, Multiplier, TrueMask, VL);
2596     // Add the new copies to our previous addition giving us 2^eltbits copies of
2597     // V2. This is equivalent to shifting V2 left by eltbits. This should
2598     // combine with the vwmulu.vv above to form vwmaccu.vv.
2599     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2600                       TrueMask, VL);
2601     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2602     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2603     // vector VT.
2604     ContainerVT =
2605         MVT::getVectorVT(VT.getVectorElementType(),
2606                          WideIntContainerVT.getVectorElementCount() * 2);
2607     Add = DAG.getBitcast(ContainerVT, Add);
2608     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2609   }
2610 
2611   // Detect shuffles which can be re-expressed as vector selects; these are
2612   // shuffles in which each element in the destination is taken from an element
2613   // at the corresponding index in either source vectors.
2614   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2615     int MaskIndex = MaskIdx.value();
2616     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2617   });
2618 
2619   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2620 
2621   SmallVector<SDValue> MaskVals;
2622   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2623   // merged with a second vrgather.
2624   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2625 
2626   // By default we preserve the original operand order, and use a mask to
2627   // select LHS as true and RHS as false. However, since RVV vector selects may
2628   // feature splats but only on the LHS, we may choose to invert our mask and
2629   // instead select between RHS and LHS.
2630   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2631   bool InvertMask = IsSelect == SwapOps;
2632 
2633   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2634   // half.
2635   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2636 
2637   // Now construct the mask that will be used by the vselect or blended
2638   // vrgather operation. For vrgathers, construct the appropriate indices into
2639   // each vector.
2640   for (int MaskIndex : Mask) {
2641     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2642     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2643     if (!IsSelect) {
2644       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2645       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2646                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2647                                      : DAG.getUNDEF(XLenVT));
2648       GatherIndicesRHS.push_back(
2649           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2650                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2651       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2652         ++LHSIndexCounts[MaskIndex];
2653       if (!IsLHSOrUndefIndex)
2654         ++RHSIndexCounts[MaskIndex - NumElts];
2655     }
2656   }
2657 
2658   if (SwapOps) {
2659     std::swap(V1, V2);
2660     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2661   }
2662 
2663   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2664   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2665   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2666 
2667   if (IsSelect)
2668     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2669 
2670   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2671     // On such a large vector we're unable to use i8 as the index type.
2672     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2673     // may involve vector splitting if we're already at LMUL=8, or our
2674     // user-supplied maximum fixed-length LMUL.
2675     return SDValue();
2676   }
2677 
2678   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2679   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2680   MVT IndexVT = VT.changeTypeToInteger();
2681   // Since we can't introduce illegal index types at this stage, use i16 and
2682   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2683   // than XLenVT.
2684   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2685     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2686     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2687   }
2688 
2689   MVT IndexContainerVT =
2690       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2691 
2692   SDValue Gather;
2693   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2694   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2695   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2696     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2697   } else {
2698     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2699     // If only one index is used, we can use a "splat" vrgather.
2700     // TODO: We can splat the most-common index and fix-up any stragglers, if
2701     // that's beneficial.
2702     if (LHSIndexCounts.size() == 1) {
2703       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2704       Gather =
2705           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2706                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2707     } else {
2708       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2709       LHSIndices =
2710           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2711 
2712       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2713                            TrueMask, VL);
2714     }
2715   }
2716 
2717   // If a second vector operand is used by this shuffle, blend it in with an
2718   // additional vrgather.
2719   if (!V2.isUndef()) {
2720     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2721     // If only one index is used, we can use a "splat" vrgather.
2722     // TODO: We can splat the most-common index and fix-up any stragglers, if
2723     // that's beneficial.
2724     if (RHSIndexCounts.size() == 1) {
2725       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2726       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2727                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2728     } else {
2729       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2730       RHSIndices =
2731           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2732       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2733                        VL);
2734     }
2735 
2736     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2737     SelectMask =
2738         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2739 
2740     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2741                          Gather, VL);
2742   }
2743 
2744   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2745 }
2746 
2747 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2748                                      SDLoc DL, SelectionDAG &DAG,
2749                                      const RISCVSubtarget &Subtarget) {
2750   if (VT.isScalableVector())
2751     return DAG.getFPExtendOrRound(Op, DL, VT);
2752   assert(VT.isFixedLengthVector() &&
2753          "Unexpected value type for RVV FP extend/round lowering");
2754   SDValue Mask, VL;
2755   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2756   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2757                         ? RISCVISD::FP_EXTEND_VL
2758                         : RISCVISD::FP_ROUND_VL;
2759   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2760 }
2761 
2762 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2763 // the exponent.
2764 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2765   MVT VT = Op.getSimpleValueType();
2766   unsigned EltSize = VT.getScalarSizeInBits();
2767   SDValue Src = Op.getOperand(0);
2768   SDLoc DL(Op);
2769 
2770   // We need a FP type that can represent the value.
2771   // TODO: Use f16 for i8 when possible?
2772   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2773   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2774 
2775   // Legal types should have been checked in the RISCVTargetLowering
2776   // constructor.
2777   // TODO: Splitting may make sense in some cases.
2778   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2779          "Expected legal float type!");
2780 
2781   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2782   // The trailing zero count is equal to log2 of this single bit value.
2783   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2784     SDValue Neg =
2785         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2786     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2787   }
2788 
2789   // We have a legal FP type, convert to it.
2790   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2791   // Bitcast to integer and shift the exponent to the LSB.
2792   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2793   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2794   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2795   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2796                               DAG.getConstant(ShiftAmt, DL, IntVT));
2797   // Truncate back to original type to allow vnsrl.
2798   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2799   // The exponent contains log2 of the value in biased form.
2800   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2801 
2802   // For trailing zeros, we just need to subtract the bias.
2803   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2804     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2805                        DAG.getConstant(ExponentBias, DL, VT));
2806 
2807   // For leading zeros, we need to remove the bias and convert from log2 to
2808   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2809   unsigned Adjust = ExponentBias + (EltSize - 1);
2810   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2811 }
2812 
2813 // While RVV has alignment restrictions, we should always be able to load as a
2814 // legal equivalently-sized byte-typed vector instead. This method is
2815 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2816 // the load is already correctly-aligned, it returns SDValue().
2817 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2818                                                     SelectionDAG &DAG) const {
2819   auto *Load = cast<LoadSDNode>(Op);
2820   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2821 
2822   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2823                                      Load->getMemoryVT(),
2824                                      *Load->getMemOperand()))
2825     return SDValue();
2826 
2827   SDLoc DL(Op);
2828   MVT VT = Op.getSimpleValueType();
2829   unsigned EltSizeBits = VT.getScalarSizeInBits();
2830   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2831          "Unexpected unaligned RVV load type");
2832   MVT NewVT =
2833       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2834   assert(NewVT.isValid() &&
2835          "Expecting equally-sized RVV vector types to be legal");
2836   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2837                           Load->getPointerInfo(), Load->getOriginalAlign(),
2838                           Load->getMemOperand()->getFlags());
2839   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2840 }
2841 
2842 // While RVV has alignment restrictions, we should always be able to store as a
2843 // legal equivalently-sized byte-typed vector instead. This method is
2844 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2845 // returns SDValue() if the store is already correctly aligned.
2846 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2847                                                      SelectionDAG &DAG) const {
2848   auto *Store = cast<StoreSDNode>(Op);
2849   assert(Store && Store->getValue().getValueType().isVector() &&
2850          "Expected vector store");
2851 
2852   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2853                                      Store->getMemoryVT(),
2854                                      *Store->getMemOperand()))
2855     return SDValue();
2856 
2857   SDLoc DL(Op);
2858   SDValue StoredVal = Store->getValue();
2859   MVT VT = StoredVal.getSimpleValueType();
2860   unsigned EltSizeBits = VT.getScalarSizeInBits();
2861   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2862          "Unexpected unaligned RVV store type");
2863   MVT NewVT =
2864       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2865   assert(NewVT.isValid() &&
2866          "Expecting equally-sized RVV vector types to be legal");
2867   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2868   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2869                       Store->getPointerInfo(), Store->getOriginalAlign(),
2870                       Store->getMemOperand()->getFlags());
2871 }
2872 
2873 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2874                                             SelectionDAG &DAG) const {
2875   switch (Op.getOpcode()) {
2876   default:
2877     report_fatal_error("unimplemented operand");
2878   case ISD::GlobalAddress:
2879     return lowerGlobalAddress(Op, DAG);
2880   case ISD::BlockAddress:
2881     return lowerBlockAddress(Op, DAG);
2882   case ISD::ConstantPool:
2883     return lowerConstantPool(Op, DAG);
2884   case ISD::JumpTable:
2885     return lowerJumpTable(Op, DAG);
2886   case ISD::GlobalTLSAddress:
2887     return lowerGlobalTLSAddress(Op, DAG);
2888   case ISD::SELECT:
2889     return lowerSELECT(Op, DAG);
2890   case ISD::BRCOND:
2891     return lowerBRCOND(Op, DAG);
2892   case ISD::VASTART:
2893     return lowerVASTART(Op, DAG);
2894   case ISD::FRAMEADDR:
2895     return lowerFRAMEADDR(Op, DAG);
2896   case ISD::RETURNADDR:
2897     return lowerRETURNADDR(Op, DAG);
2898   case ISD::SHL_PARTS:
2899     return lowerShiftLeftParts(Op, DAG);
2900   case ISD::SRA_PARTS:
2901     return lowerShiftRightParts(Op, DAG, true);
2902   case ISD::SRL_PARTS:
2903     return lowerShiftRightParts(Op, DAG, false);
2904   case ISD::BITCAST: {
2905     SDLoc DL(Op);
2906     EVT VT = Op.getValueType();
2907     SDValue Op0 = Op.getOperand(0);
2908     EVT Op0VT = Op0.getValueType();
2909     MVT XLenVT = Subtarget.getXLenVT();
2910     if (VT.isFixedLengthVector()) {
2911       // We can handle fixed length vector bitcasts with a simple replacement
2912       // in isel.
2913       if (Op0VT.isFixedLengthVector())
2914         return Op;
2915       // When bitcasting from scalar to fixed-length vector, insert the scalar
2916       // into a one-element vector of the result type, and perform a vector
2917       // bitcast.
2918       if (!Op0VT.isVector()) {
2919         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2920         if (!isTypeLegal(BVT))
2921           return SDValue();
2922         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2923                                               DAG.getUNDEF(BVT), Op0,
2924                                               DAG.getConstant(0, DL, XLenVT)));
2925       }
2926       return SDValue();
2927     }
2928     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2929     // thus: bitcast the vector to a one-element vector type whose element type
2930     // is the same as the result type, and extract the first element.
2931     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2932       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
2933       if (!isTypeLegal(BVT))
2934         return SDValue();
2935       SDValue BVec = DAG.getBitcast(BVT, Op0);
2936       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2937                          DAG.getConstant(0, DL, XLenVT));
2938     }
2939     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2940       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2941       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2942       return FPConv;
2943     }
2944     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2945         Subtarget.hasStdExtF()) {
2946       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2947       SDValue FPConv =
2948           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2949       return FPConv;
2950     }
2951     return SDValue();
2952   }
2953   case ISD::INTRINSIC_WO_CHAIN:
2954     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2955   case ISD::INTRINSIC_W_CHAIN:
2956     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2957   case ISD::INTRINSIC_VOID:
2958     return LowerINTRINSIC_VOID(Op, DAG);
2959   case ISD::BSWAP:
2960   case ISD::BITREVERSE: {
2961     MVT VT = Op.getSimpleValueType();
2962     SDLoc DL(Op);
2963     if (Subtarget.hasStdExtZbp()) {
2964       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2965       // Start with the maximum immediate value which is the bitwidth - 1.
2966       unsigned Imm = VT.getSizeInBits() - 1;
2967       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2968       if (Op.getOpcode() == ISD::BSWAP)
2969         Imm &= ~0x7U;
2970       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2971                          DAG.getConstant(Imm, DL, VT));
2972     }
2973     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
2974     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
2975     // Expand bitreverse to a bswap(rev8) followed by brev8.
2976     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
2977     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
2978     // as brev8 by an isel pattern.
2979     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
2980                        DAG.getConstant(7, DL, VT));
2981   }
2982   case ISD::FSHL:
2983   case ISD::FSHR: {
2984     MVT VT = Op.getSimpleValueType();
2985     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2986     SDLoc DL(Op);
2987     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2988     // use log(XLen) bits. Mask the shift amount accordingly to prevent
2989     // accidentally setting the extra bit.
2990     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2991     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2992                                 DAG.getConstant(ShAmtWidth, DL, VT));
2993     // fshl and fshr concatenate their operands in the same order. fsr and fsl
2994     // instruction use different orders. fshl will return its first operand for
2995     // shift of zero, fshr will return its second operand. fsl and fsr both
2996     // return rs1 so the ISD nodes need to have different operand orders.
2997     // Shift amount is in rs2.
2998     SDValue Op0 = Op.getOperand(0);
2999     SDValue Op1 = Op.getOperand(1);
3000     unsigned Opc = RISCVISD::FSL;
3001     if (Op.getOpcode() == ISD::FSHR) {
3002       std::swap(Op0, Op1);
3003       Opc = RISCVISD::FSR;
3004     }
3005     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3006   }
3007   case ISD::TRUNCATE: {
3008     SDLoc DL(Op);
3009     MVT VT = Op.getSimpleValueType();
3010     // Only custom-lower vector truncates
3011     if (!VT.isVector())
3012       return Op;
3013 
3014     // Truncates to mask types are handled differently
3015     if (VT.getVectorElementType() == MVT::i1)
3016       return lowerVectorMaskTrunc(Op, DAG);
3017 
3018     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3019     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3020     // truncate by one power of two at a time.
3021     MVT DstEltVT = VT.getVectorElementType();
3022 
3023     SDValue Src = Op.getOperand(0);
3024     MVT SrcVT = Src.getSimpleValueType();
3025     MVT SrcEltVT = SrcVT.getVectorElementType();
3026 
3027     assert(DstEltVT.bitsLT(SrcEltVT) &&
3028            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3029            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3030            "Unexpected vector truncate lowering");
3031 
3032     MVT ContainerVT = SrcVT;
3033     if (SrcVT.isFixedLengthVector()) {
3034       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3035       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3036     }
3037 
3038     SDValue Result = Src;
3039     SDValue Mask, VL;
3040     std::tie(Mask, VL) =
3041         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3042     LLVMContext &Context = *DAG.getContext();
3043     const ElementCount Count = ContainerVT.getVectorElementCount();
3044     do {
3045       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3046       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3047       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3048                            Mask, VL);
3049     } while (SrcEltVT != DstEltVT);
3050 
3051     if (SrcVT.isFixedLengthVector())
3052       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3053 
3054     return Result;
3055   }
3056   case ISD::ANY_EXTEND:
3057   case ISD::ZERO_EXTEND:
3058     if (Op.getOperand(0).getValueType().isVector() &&
3059         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3060       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3061     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3062   case ISD::SIGN_EXTEND:
3063     if (Op.getOperand(0).getValueType().isVector() &&
3064         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3065       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3066     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3067   case ISD::SPLAT_VECTOR_PARTS:
3068     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3069   case ISD::INSERT_VECTOR_ELT:
3070     return lowerINSERT_VECTOR_ELT(Op, DAG);
3071   case ISD::EXTRACT_VECTOR_ELT:
3072     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3073   case ISD::VSCALE: {
3074     MVT VT = Op.getSimpleValueType();
3075     SDLoc DL(Op);
3076     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3077     // We define our scalable vector types for lmul=1 to use a 64 bit known
3078     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3079     // vscale as VLENB / 8.
3080     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3081     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3082       // We assume VLENB is a multiple of 8. We manually choose the best shift
3083       // here because SimplifyDemandedBits isn't always able to simplify it.
3084       uint64_t Val = Op.getConstantOperandVal(0);
3085       if (isPowerOf2_64(Val)) {
3086         uint64_t Log2 = Log2_64(Val);
3087         if (Log2 < 3)
3088           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3089                              DAG.getConstant(3 - Log2, DL, VT));
3090         if (Log2 > 3)
3091           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3092                              DAG.getConstant(Log2 - 3, DL, VT));
3093         return VLENB;
3094       }
3095       // If the multiplier is a multiple of 8, scale it down to avoid needing
3096       // to shift the VLENB value.
3097       if ((Val % 8) == 0)
3098         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3099                            DAG.getConstant(Val / 8, DL, VT));
3100     }
3101 
3102     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3103                                  DAG.getConstant(3, DL, VT));
3104     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3105   }
3106   case ISD::FPOWI: {
3107     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3108     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3109     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3110         Op.getOperand(1).getValueType() == MVT::i32) {
3111       SDLoc DL(Op);
3112       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3113       SDValue Powi =
3114           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3115       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3116                          DAG.getIntPtrConstant(0, DL));
3117     }
3118     return SDValue();
3119   }
3120   case ISD::FP_EXTEND: {
3121     // RVV can only do fp_extend to types double the size as the source. We
3122     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3123     // via f32.
3124     SDLoc DL(Op);
3125     MVT VT = Op.getSimpleValueType();
3126     SDValue Src = Op.getOperand(0);
3127     MVT SrcVT = Src.getSimpleValueType();
3128 
3129     // Prepare any fixed-length vector operands.
3130     MVT ContainerVT = VT;
3131     if (SrcVT.isFixedLengthVector()) {
3132       ContainerVT = getContainerForFixedLengthVector(VT);
3133       MVT SrcContainerVT =
3134           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3135       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3136     }
3137 
3138     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3139         SrcVT.getVectorElementType() != MVT::f16) {
3140       // For scalable vectors, we only need to close the gap between
3141       // vXf16->vXf64.
3142       if (!VT.isFixedLengthVector())
3143         return Op;
3144       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3145       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3146       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3147     }
3148 
3149     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3150     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3151     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3152         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3153 
3154     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3155                                            DL, DAG, Subtarget);
3156     if (VT.isFixedLengthVector())
3157       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3158     return Extend;
3159   }
3160   case ISD::FP_ROUND: {
3161     // RVV can only do fp_round to types half the size as the source. We
3162     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3163     // conversion instruction.
3164     SDLoc DL(Op);
3165     MVT VT = Op.getSimpleValueType();
3166     SDValue Src = Op.getOperand(0);
3167     MVT SrcVT = Src.getSimpleValueType();
3168 
3169     // Prepare any fixed-length vector operands.
3170     MVT ContainerVT = VT;
3171     if (VT.isFixedLengthVector()) {
3172       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3173       ContainerVT =
3174           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3175       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3176     }
3177 
3178     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3179         SrcVT.getVectorElementType() != MVT::f64) {
3180       // For scalable vectors, we only need to close the gap between
3181       // vXf64<->vXf16.
3182       if (!VT.isFixedLengthVector())
3183         return Op;
3184       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3185       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3186       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3187     }
3188 
3189     SDValue Mask, VL;
3190     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3191 
3192     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3193     SDValue IntermediateRound =
3194         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3195     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3196                                           DL, DAG, Subtarget);
3197 
3198     if (VT.isFixedLengthVector())
3199       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3200     return Round;
3201   }
3202   case ISD::FP_TO_SINT:
3203   case ISD::FP_TO_UINT:
3204   case ISD::SINT_TO_FP:
3205   case ISD::UINT_TO_FP: {
3206     // RVV can only do fp<->int conversions to types half/double the size as
3207     // the source. We custom-lower any conversions that do two hops into
3208     // sequences.
3209     MVT VT = Op.getSimpleValueType();
3210     if (!VT.isVector())
3211       return Op;
3212     SDLoc DL(Op);
3213     SDValue Src = Op.getOperand(0);
3214     MVT EltVT = VT.getVectorElementType();
3215     MVT SrcVT = Src.getSimpleValueType();
3216     MVT SrcEltVT = SrcVT.getVectorElementType();
3217     unsigned EltSize = EltVT.getSizeInBits();
3218     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3219     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3220            "Unexpected vector element types");
3221 
3222     bool IsInt2FP = SrcEltVT.isInteger();
3223     // Widening conversions
3224     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3225       if (IsInt2FP) {
3226         // Do a regular integer sign/zero extension then convert to float.
3227         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3228                                       VT.getVectorElementCount());
3229         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3230                                  ? ISD::ZERO_EXTEND
3231                                  : ISD::SIGN_EXTEND;
3232         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3233         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3234       }
3235       // FP2Int
3236       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3237       // Do one doubling fp_extend then complete the operation by converting
3238       // to int.
3239       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3240       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3241       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3242     }
3243 
3244     // Narrowing conversions
3245     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3246       if (IsInt2FP) {
3247         // One narrowing int_to_fp, then an fp_round.
3248         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3249         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3250         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3251         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3252       }
3253       // FP2Int
3254       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3255       // representable by the integer, the result is poison.
3256       MVT IVecVT =
3257           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3258                            VT.getVectorElementCount());
3259       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3260       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3261     }
3262 
3263     // Scalable vectors can exit here. Patterns will handle equally-sized
3264     // conversions halving/doubling ones.
3265     if (!VT.isFixedLengthVector())
3266       return Op;
3267 
3268     // For fixed-length vectors we lower to a custom "VL" node.
3269     unsigned RVVOpc = 0;
3270     switch (Op.getOpcode()) {
3271     default:
3272       llvm_unreachable("Impossible opcode");
3273     case ISD::FP_TO_SINT:
3274       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3275       break;
3276     case ISD::FP_TO_UINT:
3277       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3278       break;
3279     case ISD::SINT_TO_FP:
3280       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3281       break;
3282     case ISD::UINT_TO_FP:
3283       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3284       break;
3285     }
3286 
3287     MVT ContainerVT, SrcContainerVT;
3288     // Derive the reference container type from the larger vector type.
3289     if (SrcEltSize > EltSize) {
3290       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3291       ContainerVT =
3292           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3293     } else {
3294       ContainerVT = getContainerForFixedLengthVector(VT);
3295       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3296     }
3297 
3298     SDValue Mask, VL;
3299     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3300 
3301     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3302     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3303     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3304   }
3305   case ISD::FP_TO_SINT_SAT:
3306   case ISD::FP_TO_UINT_SAT:
3307     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3308   case ISD::FTRUNC:
3309   case ISD::FCEIL:
3310   case ISD::FFLOOR:
3311     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3312   case ISD::VECREDUCE_ADD:
3313   case ISD::VECREDUCE_UMAX:
3314   case ISD::VECREDUCE_SMAX:
3315   case ISD::VECREDUCE_UMIN:
3316   case ISD::VECREDUCE_SMIN:
3317     return lowerVECREDUCE(Op, DAG);
3318   case ISD::VECREDUCE_AND:
3319   case ISD::VECREDUCE_OR:
3320   case ISD::VECREDUCE_XOR:
3321     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3322       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3323     return lowerVECREDUCE(Op, DAG);
3324   case ISD::VECREDUCE_FADD:
3325   case ISD::VECREDUCE_SEQ_FADD:
3326   case ISD::VECREDUCE_FMIN:
3327   case ISD::VECREDUCE_FMAX:
3328     return lowerFPVECREDUCE(Op, DAG);
3329   case ISD::VP_REDUCE_ADD:
3330   case ISD::VP_REDUCE_UMAX:
3331   case ISD::VP_REDUCE_SMAX:
3332   case ISD::VP_REDUCE_UMIN:
3333   case ISD::VP_REDUCE_SMIN:
3334   case ISD::VP_REDUCE_FADD:
3335   case ISD::VP_REDUCE_SEQ_FADD:
3336   case ISD::VP_REDUCE_FMIN:
3337   case ISD::VP_REDUCE_FMAX:
3338     return lowerVPREDUCE(Op, DAG);
3339   case ISD::VP_REDUCE_AND:
3340   case ISD::VP_REDUCE_OR:
3341   case ISD::VP_REDUCE_XOR:
3342     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3343       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3344     return lowerVPREDUCE(Op, DAG);
3345   case ISD::INSERT_SUBVECTOR:
3346     return lowerINSERT_SUBVECTOR(Op, DAG);
3347   case ISD::EXTRACT_SUBVECTOR:
3348     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3349   case ISD::STEP_VECTOR:
3350     return lowerSTEP_VECTOR(Op, DAG);
3351   case ISD::VECTOR_REVERSE:
3352     return lowerVECTOR_REVERSE(Op, DAG);
3353   case ISD::BUILD_VECTOR:
3354     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3355   case ISD::SPLAT_VECTOR:
3356     if (Op.getValueType().getVectorElementType() == MVT::i1)
3357       return lowerVectorMaskSplat(Op, DAG);
3358     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3359   case ISD::VECTOR_SHUFFLE:
3360     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3361   case ISD::CONCAT_VECTORS: {
3362     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3363     // better than going through the stack, as the default expansion does.
3364     SDLoc DL(Op);
3365     MVT VT = Op.getSimpleValueType();
3366     unsigned NumOpElts =
3367         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3368     SDValue Vec = DAG.getUNDEF(VT);
3369     for (const auto &OpIdx : enumerate(Op->ops())) {
3370       SDValue SubVec = OpIdx.value();
3371       // Don't insert undef subvectors.
3372       if (SubVec.isUndef())
3373         continue;
3374       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3375                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3376     }
3377     return Vec;
3378   }
3379   case ISD::LOAD:
3380     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3381       return V;
3382     if (Op.getValueType().isFixedLengthVector())
3383       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3384     return Op;
3385   case ISD::STORE:
3386     if (auto V = expandUnalignedRVVStore(Op, DAG))
3387       return V;
3388     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3389       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3390     return Op;
3391   case ISD::MLOAD:
3392   case ISD::VP_LOAD:
3393     return lowerMaskedLoad(Op, DAG);
3394   case ISD::MSTORE:
3395   case ISD::VP_STORE:
3396     return lowerMaskedStore(Op, DAG);
3397   case ISD::SETCC:
3398     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3399   case ISD::ADD:
3400     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3401   case ISD::SUB:
3402     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3403   case ISD::MUL:
3404     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3405   case ISD::MULHS:
3406     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3407   case ISD::MULHU:
3408     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3409   case ISD::AND:
3410     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3411                                               RISCVISD::AND_VL);
3412   case ISD::OR:
3413     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3414                                               RISCVISD::OR_VL);
3415   case ISD::XOR:
3416     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3417                                               RISCVISD::XOR_VL);
3418   case ISD::SDIV:
3419     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3420   case ISD::SREM:
3421     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3422   case ISD::UDIV:
3423     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3424   case ISD::UREM:
3425     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3426   case ISD::SHL:
3427   case ISD::SRA:
3428   case ISD::SRL:
3429     if (Op.getSimpleValueType().isFixedLengthVector())
3430       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3431     // This can be called for an i32 shift amount that needs to be promoted.
3432     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3433            "Unexpected custom legalisation");
3434     return SDValue();
3435   case ISD::SADDSAT:
3436     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3437   case ISD::UADDSAT:
3438     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3439   case ISD::SSUBSAT:
3440     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3441   case ISD::USUBSAT:
3442     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3443   case ISD::FADD:
3444     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3445   case ISD::FSUB:
3446     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3447   case ISD::FMUL:
3448     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3449   case ISD::FDIV:
3450     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3451   case ISD::FNEG:
3452     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3453   case ISD::FABS:
3454     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3455   case ISD::FSQRT:
3456     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3457   case ISD::FMA:
3458     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3459   case ISD::SMIN:
3460     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3461   case ISD::SMAX:
3462     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3463   case ISD::UMIN:
3464     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3465   case ISD::UMAX:
3466     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3467   case ISD::FMINNUM:
3468     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3469   case ISD::FMAXNUM:
3470     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3471   case ISD::ABS:
3472     return lowerABS(Op, DAG);
3473   case ISD::CTLZ_ZERO_UNDEF:
3474   case ISD::CTTZ_ZERO_UNDEF:
3475     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3476   case ISD::VSELECT:
3477     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3478   case ISD::FCOPYSIGN:
3479     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3480   case ISD::MGATHER:
3481   case ISD::VP_GATHER:
3482     return lowerMaskedGather(Op, DAG);
3483   case ISD::MSCATTER:
3484   case ISD::VP_SCATTER:
3485     return lowerMaskedScatter(Op, DAG);
3486   case ISD::FLT_ROUNDS_:
3487     return lowerGET_ROUNDING(Op, DAG);
3488   case ISD::SET_ROUNDING:
3489     return lowerSET_ROUNDING(Op, DAG);
3490   case ISD::VP_SELECT:
3491     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3492   case ISD::VP_MERGE:
3493     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3494   case ISD::VP_ADD:
3495     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3496   case ISD::VP_SUB:
3497     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3498   case ISD::VP_MUL:
3499     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3500   case ISD::VP_SDIV:
3501     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3502   case ISD::VP_UDIV:
3503     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3504   case ISD::VP_SREM:
3505     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3506   case ISD::VP_UREM:
3507     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3508   case ISD::VP_AND:
3509     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3510   case ISD::VP_OR:
3511     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3512   case ISD::VP_XOR:
3513     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3514   case ISD::VP_ASHR:
3515     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3516   case ISD::VP_LSHR:
3517     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3518   case ISD::VP_SHL:
3519     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3520   case ISD::VP_FADD:
3521     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3522   case ISD::VP_FSUB:
3523     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3524   case ISD::VP_FMUL:
3525     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3526   case ISD::VP_FDIV:
3527     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3528   }
3529 }
3530 
3531 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3532                              SelectionDAG &DAG, unsigned Flags) {
3533   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3534 }
3535 
3536 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3537                              SelectionDAG &DAG, unsigned Flags) {
3538   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3539                                    Flags);
3540 }
3541 
3542 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3543                              SelectionDAG &DAG, unsigned Flags) {
3544   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3545                                    N->getOffset(), Flags);
3546 }
3547 
3548 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3549                              SelectionDAG &DAG, unsigned Flags) {
3550   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3551 }
3552 
3553 template <class NodeTy>
3554 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3555                                      bool IsLocal) const {
3556   SDLoc DL(N);
3557   EVT Ty = getPointerTy(DAG.getDataLayout());
3558 
3559   if (isPositionIndependent()) {
3560     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3561     if (IsLocal)
3562       // Use PC-relative addressing to access the symbol. This generates the
3563       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3564       // %pcrel_lo(auipc)).
3565       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3566 
3567     // Use PC-relative addressing to access the GOT for this symbol, then load
3568     // the address from the GOT. This generates the pattern (PseudoLA sym),
3569     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3570     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3571   }
3572 
3573   switch (getTargetMachine().getCodeModel()) {
3574   default:
3575     report_fatal_error("Unsupported code model for lowering");
3576   case CodeModel::Small: {
3577     // Generate a sequence for accessing addresses within the first 2 GiB of
3578     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3579     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3580     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3581     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3582     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3583   }
3584   case CodeModel::Medium: {
3585     // Generate a sequence for accessing addresses within any 2GiB range within
3586     // the address space. This generates the pattern (PseudoLLA sym), which
3587     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3588     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3589     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3590   }
3591   }
3592 }
3593 
3594 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3595                                                 SelectionDAG &DAG) const {
3596   SDLoc DL(Op);
3597   EVT Ty = Op.getValueType();
3598   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3599   int64_t Offset = N->getOffset();
3600   MVT XLenVT = Subtarget.getXLenVT();
3601 
3602   const GlobalValue *GV = N->getGlobal();
3603   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3604   SDValue Addr = getAddr(N, DAG, IsLocal);
3605 
3606   // In order to maximise the opportunity for common subexpression elimination,
3607   // emit a separate ADD node for the global address offset instead of folding
3608   // it in the global address node. Later peephole optimisations may choose to
3609   // fold it back in when profitable.
3610   if (Offset != 0)
3611     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3612                        DAG.getConstant(Offset, DL, XLenVT));
3613   return Addr;
3614 }
3615 
3616 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3617                                                SelectionDAG &DAG) const {
3618   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3619 
3620   return getAddr(N, DAG);
3621 }
3622 
3623 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3624                                                SelectionDAG &DAG) const {
3625   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3626 
3627   return getAddr(N, DAG);
3628 }
3629 
3630 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3631                                             SelectionDAG &DAG) const {
3632   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3633 
3634   return getAddr(N, DAG);
3635 }
3636 
3637 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3638                                               SelectionDAG &DAG,
3639                                               bool UseGOT) const {
3640   SDLoc DL(N);
3641   EVT Ty = getPointerTy(DAG.getDataLayout());
3642   const GlobalValue *GV = N->getGlobal();
3643   MVT XLenVT = Subtarget.getXLenVT();
3644 
3645   if (UseGOT) {
3646     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3647     // load the address from the GOT and add the thread pointer. This generates
3648     // the pattern (PseudoLA_TLS_IE sym), which expands to
3649     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3650     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3651     SDValue Load =
3652         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3653 
3654     // Add the thread pointer.
3655     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3656     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3657   }
3658 
3659   // Generate a sequence for accessing the address relative to the thread
3660   // pointer, with the appropriate adjustment for the thread pointer offset.
3661   // This generates the pattern
3662   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3663   SDValue AddrHi =
3664       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3665   SDValue AddrAdd =
3666       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3667   SDValue AddrLo =
3668       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3669 
3670   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3671   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3672   SDValue MNAdd = SDValue(
3673       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3674       0);
3675   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3676 }
3677 
3678 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3679                                                SelectionDAG &DAG) const {
3680   SDLoc DL(N);
3681   EVT Ty = getPointerTy(DAG.getDataLayout());
3682   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3683   const GlobalValue *GV = N->getGlobal();
3684 
3685   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3686   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3687   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3688   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3689   SDValue Load =
3690       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3691 
3692   // Prepare argument list to generate call.
3693   ArgListTy Args;
3694   ArgListEntry Entry;
3695   Entry.Node = Load;
3696   Entry.Ty = CallTy;
3697   Args.push_back(Entry);
3698 
3699   // Setup call to __tls_get_addr.
3700   TargetLowering::CallLoweringInfo CLI(DAG);
3701   CLI.setDebugLoc(DL)
3702       .setChain(DAG.getEntryNode())
3703       .setLibCallee(CallingConv::C, CallTy,
3704                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3705                     std::move(Args));
3706 
3707   return LowerCallTo(CLI).first;
3708 }
3709 
3710 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3711                                                    SelectionDAG &DAG) const {
3712   SDLoc DL(Op);
3713   EVT Ty = Op.getValueType();
3714   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3715   int64_t Offset = N->getOffset();
3716   MVT XLenVT = Subtarget.getXLenVT();
3717 
3718   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3719 
3720   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3721       CallingConv::GHC)
3722     report_fatal_error("In GHC calling convention TLS is not supported");
3723 
3724   SDValue Addr;
3725   switch (Model) {
3726   case TLSModel::LocalExec:
3727     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3728     break;
3729   case TLSModel::InitialExec:
3730     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3731     break;
3732   case TLSModel::LocalDynamic:
3733   case TLSModel::GeneralDynamic:
3734     Addr = getDynamicTLSAddr(N, DAG);
3735     break;
3736   }
3737 
3738   // In order to maximise the opportunity for common subexpression elimination,
3739   // emit a separate ADD node for the global address offset instead of folding
3740   // it in the global address node. Later peephole optimisations may choose to
3741   // fold it back in when profitable.
3742   if (Offset != 0)
3743     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3744                        DAG.getConstant(Offset, DL, XLenVT));
3745   return Addr;
3746 }
3747 
3748 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3749   SDValue CondV = Op.getOperand(0);
3750   SDValue TrueV = Op.getOperand(1);
3751   SDValue FalseV = Op.getOperand(2);
3752   SDLoc DL(Op);
3753   MVT VT = Op.getSimpleValueType();
3754   MVT XLenVT = Subtarget.getXLenVT();
3755 
3756   // Lower vector SELECTs to VSELECTs by splatting the condition.
3757   if (VT.isVector()) {
3758     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3759     SDValue CondSplat = VT.isScalableVector()
3760                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3761                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3762     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3763   }
3764 
3765   // If the result type is XLenVT and CondV is the output of a SETCC node
3766   // which also operated on XLenVT inputs, then merge the SETCC node into the
3767   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3768   // compare+branch instructions. i.e.:
3769   // (select (setcc lhs, rhs, cc), truev, falsev)
3770   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3771   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3772       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3773     SDValue LHS = CondV.getOperand(0);
3774     SDValue RHS = CondV.getOperand(1);
3775     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3776     ISD::CondCode CCVal = CC->get();
3777 
3778     // Special case for a select of 2 constants that have a diffence of 1.
3779     // Normally this is done by DAGCombine, but if the select is introduced by
3780     // type legalization or op legalization, we miss it. Restricting to SETLT
3781     // case for now because that is what signed saturating add/sub need.
3782     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3783     // but we would probably want to swap the true/false values if the condition
3784     // is SETGE/SETLE to avoid an XORI.
3785     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3786         CCVal == ISD::SETLT) {
3787       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3788       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3789       if (TrueVal - 1 == FalseVal)
3790         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3791       if (TrueVal + 1 == FalseVal)
3792         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3793     }
3794 
3795     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3796 
3797     SDValue TargetCC = DAG.getCondCode(CCVal);
3798     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3799     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3800   }
3801 
3802   // Otherwise:
3803   // (select condv, truev, falsev)
3804   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3805   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3806   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3807 
3808   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3809 
3810   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3811 }
3812 
3813 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3814   SDValue CondV = Op.getOperand(1);
3815   SDLoc DL(Op);
3816   MVT XLenVT = Subtarget.getXLenVT();
3817 
3818   if (CondV.getOpcode() == ISD::SETCC &&
3819       CondV.getOperand(0).getValueType() == XLenVT) {
3820     SDValue LHS = CondV.getOperand(0);
3821     SDValue RHS = CondV.getOperand(1);
3822     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3823 
3824     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3825 
3826     SDValue TargetCC = DAG.getCondCode(CCVal);
3827     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3828                        LHS, RHS, TargetCC, Op.getOperand(2));
3829   }
3830 
3831   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3832                      CondV, DAG.getConstant(0, DL, XLenVT),
3833                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3834 }
3835 
3836 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3837   MachineFunction &MF = DAG.getMachineFunction();
3838   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3839 
3840   SDLoc DL(Op);
3841   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3842                                  getPointerTy(MF.getDataLayout()));
3843 
3844   // vastart just stores the address of the VarArgsFrameIndex slot into the
3845   // memory location argument.
3846   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3847   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3848                       MachinePointerInfo(SV));
3849 }
3850 
3851 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3852                                             SelectionDAG &DAG) const {
3853   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3854   MachineFunction &MF = DAG.getMachineFunction();
3855   MachineFrameInfo &MFI = MF.getFrameInfo();
3856   MFI.setFrameAddressIsTaken(true);
3857   Register FrameReg = RI.getFrameRegister(MF);
3858   int XLenInBytes = Subtarget.getXLen() / 8;
3859 
3860   EVT VT = Op.getValueType();
3861   SDLoc DL(Op);
3862   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3863   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3864   while (Depth--) {
3865     int Offset = -(XLenInBytes * 2);
3866     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3867                               DAG.getIntPtrConstant(Offset, DL));
3868     FrameAddr =
3869         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3870   }
3871   return FrameAddr;
3872 }
3873 
3874 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3875                                              SelectionDAG &DAG) const {
3876   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3877   MachineFunction &MF = DAG.getMachineFunction();
3878   MachineFrameInfo &MFI = MF.getFrameInfo();
3879   MFI.setReturnAddressIsTaken(true);
3880   MVT XLenVT = Subtarget.getXLenVT();
3881   int XLenInBytes = Subtarget.getXLen() / 8;
3882 
3883   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3884     return SDValue();
3885 
3886   EVT VT = Op.getValueType();
3887   SDLoc DL(Op);
3888   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3889   if (Depth) {
3890     int Off = -XLenInBytes;
3891     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3892     SDValue Offset = DAG.getConstant(Off, DL, VT);
3893     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3894                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3895                        MachinePointerInfo());
3896   }
3897 
3898   // Return the value of the return address register, marking it an implicit
3899   // live-in.
3900   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3901   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3902 }
3903 
3904 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3905                                                  SelectionDAG &DAG) const {
3906   SDLoc DL(Op);
3907   SDValue Lo = Op.getOperand(0);
3908   SDValue Hi = Op.getOperand(1);
3909   SDValue Shamt = Op.getOperand(2);
3910   EVT VT = Lo.getValueType();
3911 
3912   // if Shamt-XLEN < 0: // Shamt < XLEN
3913   //   Lo = Lo << Shamt
3914   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3915   // else:
3916   //   Lo = 0
3917   //   Hi = Lo << (Shamt-XLEN)
3918 
3919   SDValue Zero = DAG.getConstant(0, DL, VT);
3920   SDValue One = DAG.getConstant(1, DL, VT);
3921   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3922   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3923   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3924   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3925 
3926   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3927   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3928   SDValue ShiftRightLo =
3929       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3930   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3931   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3932   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3933 
3934   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3935 
3936   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3937   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3938 
3939   SDValue Parts[2] = {Lo, Hi};
3940   return DAG.getMergeValues(Parts, DL);
3941 }
3942 
3943 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3944                                                   bool IsSRA) const {
3945   SDLoc DL(Op);
3946   SDValue Lo = Op.getOperand(0);
3947   SDValue Hi = Op.getOperand(1);
3948   SDValue Shamt = Op.getOperand(2);
3949   EVT VT = Lo.getValueType();
3950 
3951   // SRA expansion:
3952   //   if Shamt-XLEN < 0: // Shamt < XLEN
3953   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3954   //     Hi = Hi >>s Shamt
3955   //   else:
3956   //     Lo = Hi >>s (Shamt-XLEN);
3957   //     Hi = Hi >>s (XLEN-1)
3958   //
3959   // SRL expansion:
3960   //   if Shamt-XLEN < 0: // Shamt < XLEN
3961   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3962   //     Hi = Hi >>u Shamt
3963   //   else:
3964   //     Lo = Hi >>u (Shamt-XLEN);
3965   //     Hi = 0;
3966 
3967   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3968 
3969   SDValue Zero = DAG.getConstant(0, DL, VT);
3970   SDValue One = DAG.getConstant(1, DL, VT);
3971   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3972   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3973   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3974   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3975 
3976   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3977   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3978   SDValue ShiftLeftHi =
3979       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3980   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3981   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3982   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3983   SDValue HiFalse =
3984       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3985 
3986   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3987 
3988   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3989   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3990 
3991   SDValue Parts[2] = {Lo, Hi};
3992   return DAG.getMergeValues(Parts, DL);
3993 }
3994 
3995 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3996 // legal equivalently-sized i8 type, so we can use that as a go-between.
3997 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3998                                                   SelectionDAG &DAG) const {
3999   SDLoc DL(Op);
4000   MVT VT = Op.getSimpleValueType();
4001   SDValue SplatVal = Op.getOperand(0);
4002   // All-zeros or all-ones splats are handled specially.
4003   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4004     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4005     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4006   }
4007   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4008     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4009     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4010   }
4011   MVT XLenVT = Subtarget.getXLenVT();
4012   assert(SplatVal.getValueType() == XLenVT &&
4013          "Unexpected type for i1 splat value");
4014   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4015   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4016                          DAG.getConstant(1, DL, XLenVT));
4017   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4018   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4019   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4020 }
4021 
4022 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4023 // illegal (currently only vXi64 RV32).
4024 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4025 // them to SPLAT_VECTOR_I64
4026 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4027                                                      SelectionDAG &DAG) const {
4028   SDLoc DL(Op);
4029   MVT VecVT = Op.getSimpleValueType();
4030   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4031          "Unexpected SPLAT_VECTOR_PARTS lowering");
4032 
4033   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4034   SDValue Lo = Op.getOperand(0);
4035   SDValue Hi = Op.getOperand(1);
4036 
4037   if (VecVT.isFixedLengthVector()) {
4038     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4039     SDLoc DL(Op);
4040     SDValue Mask, VL;
4041     std::tie(Mask, VL) =
4042         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4043 
4044     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
4045     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4046   }
4047 
4048   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4049     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4050     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4051     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4052     // node in order to try and match RVV vector/scalar instructions.
4053     if ((LoC >> 31) == HiC)
4054       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
4055   }
4056 
4057   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4058   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4059       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4060       Hi.getConstantOperandVal(1) == 31)
4061     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
4062 
4063   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4064   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
4065                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
4066 }
4067 
4068 // Custom-lower extensions from mask vectors by using a vselect either with 1
4069 // for zero/any-extension or -1 for sign-extension:
4070 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4071 // Note that any-extension is lowered identically to zero-extension.
4072 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4073                                                 int64_t ExtTrueVal) const {
4074   SDLoc DL(Op);
4075   MVT VecVT = Op.getSimpleValueType();
4076   SDValue Src = Op.getOperand(0);
4077   // Only custom-lower extensions from mask types
4078   assert(Src.getValueType().isVector() &&
4079          Src.getValueType().getVectorElementType() == MVT::i1);
4080 
4081   MVT XLenVT = Subtarget.getXLenVT();
4082   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4083   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4084 
4085   if (VecVT.isScalableVector()) {
4086     // Be careful not to introduce illegal scalar types at this stage, and be
4087     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4088     // illegal and must be expanded. Since we know that the constants are
4089     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
4090     bool IsRV32E64 =
4091         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4092 
4093     if (!IsRV32E64) {
4094       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4095       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4096     } else {
4097       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
4098       SplatTrueVal =
4099           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
4100     }
4101 
4102     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4103   }
4104 
4105   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4106   MVT I1ContainerVT =
4107       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4108 
4109   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4110 
4111   SDValue Mask, VL;
4112   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4113 
4114   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
4115   SplatTrueVal =
4116       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
4117   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4118                                SplatTrueVal, SplatZero, VL);
4119 
4120   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4121 }
4122 
4123 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4124     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4125   MVT ExtVT = Op.getSimpleValueType();
4126   // Only custom-lower extensions from fixed-length vector types.
4127   if (!ExtVT.isFixedLengthVector())
4128     return Op;
4129   MVT VT = Op.getOperand(0).getSimpleValueType();
4130   // Grab the canonical container type for the extended type. Infer the smaller
4131   // type from that to ensure the same number of vector elements, as we know
4132   // the LMUL will be sufficient to hold the smaller type.
4133   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4134   // Get the extended container type manually to ensure the same number of
4135   // vector elements between source and dest.
4136   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4137                                      ContainerExtVT.getVectorElementCount());
4138 
4139   SDValue Op1 =
4140       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4141 
4142   SDLoc DL(Op);
4143   SDValue Mask, VL;
4144   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4145 
4146   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4147 
4148   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4149 }
4150 
4151 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4152 // setcc operation:
4153 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4154 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4155                                                   SelectionDAG &DAG) const {
4156   SDLoc DL(Op);
4157   EVT MaskVT = Op.getValueType();
4158   // Only expect to custom-lower truncations to mask types
4159   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4160          "Unexpected type for vector mask lowering");
4161   SDValue Src = Op.getOperand(0);
4162   MVT VecVT = Src.getSimpleValueType();
4163 
4164   // If this is a fixed vector, we need to convert it to a scalable vector.
4165   MVT ContainerVT = VecVT;
4166   if (VecVT.isFixedLengthVector()) {
4167     ContainerVT = getContainerForFixedLengthVector(VecVT);
4168     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4169   }
4170 
4171   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4172   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4173 
4174   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
4175   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
4176 
4177   if (VecVT.isScalableVector()) {
4178     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4179     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4180   }
4181 
4182   SDValue Mask, VL;
4183   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4184 
4185   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4186   SDValue Trunc =
4187       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4188   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4189                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4190   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4191 }
4192 
4193 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4194 // first position of a vector, and that vector is slid up to the insert index.
4195 // By limiting the active vector length to index+1 and merging with the
4196 // original vector (with an undisturbed tail policy for elements >= VL), we
4197 // achieve the desired result of leaving all elements untouched except the one
4198 // at VL-1, which is replaced with the desired value.
4199 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4200                                                     SelectionDAG &DAG) const {
4201   SDLoc DL(Op);
4202   MVT VecVT = Op.getSimpleValueType();
4203   SDValue Vec = Op.getOperand(0);
4204   SDValue Val = Op.getOperand(1);
4205   SDValue Idx = Op.getOperand(2);
4206 
4207   if (VecVT.getVectorElementType() == MVT::i1) {
4208     // FIXME: For now we just promote to an i8 vector and insert into that,
4209     // but this is probably not optimal.
4210     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4211     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4212     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4213     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4214   }
4215 
4216   MVT ContainerVT = VecVT;
4217   // If the operand is a fixed-length vector, convert to a scalable one.
4218   if (VecVT.isFixedLengthVector()) {
4219     ContainerVT = getContainerForFixedLengthVector(VecVT);
4220     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4221   }
4222 
4223   MVT XLenVT = Subtarget.getXLenVT();
4224 
4225   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4226   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4227   // Even i64-element vectors on RV32 can be lowered without scalar
4228   // legalization if the most-significant 32 bits of the value are not affected
4229   // by the sign-extension of the lower 32 bits.
4230   // TODO: We could also catch sign extensions of a 32-bit value.
4231   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4232     const auto *CVal = cast<ConstantSDNode>(Val);
4233     if (isInt<32>(CVal->getSExtValue())) {
4234       IsLegalInsert = true;
4235       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4236     }
4237   }
4238 
4239   SDValue Mask, VL;
4240   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4241 
4242   SDValue ValInVec;
4243 
4244   if (IsLegalInsert) {
4245     unsigned Opc =
4246         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4247     if (isNullConstant(Idx)) {
4248       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4249       if (!VecVT.isFixedLengthVector())
4250         return Vec;
4251       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4252     }
4253     ValInVec =
4254         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4255   } else {
4256     // On RV32, i64-element vectors must be specially handled to place the
4257     // value at element 0, by using two vslide1up instructions in sequence on
4258     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4259     // this.
4260     SDValue One = DAG.getConstant(1, DL, XLenVT);
4261     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4262     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4263     MVT I32ContainerVT =
4264         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4265     SDValue I32Mask =
4266         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4267     // Limit the active VL to two.
4268     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4269     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4270     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4271     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
4272                            InsertI64VL);
4273     // First slide in the hi value, then the lo in underneath it.
4274     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4275                            ValHi, I32Mask, InsertI64VL);
4276     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4277                            ValLo, I32Mask, InsertI64VL);
4278     // Bitcast back to the right container type.
4279     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4280   }
4281 
4282   // Now that the value is in a vector, slide it into position.
4283   SDValue InsertVL =
4284       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4285   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4286                                 ValInVec, Idx, Mask, InsertVL);
4287   if (!VecVT.isFixedLengthVector())
4288     return Slideup;
4289   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4290 }
4291 
4292 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4293 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4294 // types this is done using VMV_X_S to allow us to glean information about the
4295 // sign bits of the result.
4296 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4297                                                      SelectionDAG &DAG) const {
4298   SDLoc DL(Op);
4299   SDValue Idx = Op.getOperand(1);
4300   SDValue Vec = Op.getOperand(0);
4301   EVT EltVT = Op.getValueType();
4302   MVT VecVT = Vec.getSimpleValueType();
4303   MVT XLenVT = Subtarget.getXLenVT();
4304 
4305   if (VecVT.getVectorElementType() == MVT::i1) {
4306     if (VecVT.isFixedLengthVector()) {
4307       unsigned NumElts = VecVT.getVectorNumElements();
4308       if (NumElts >= 8) {
4309         MVT WideEltVT;
4310         unsigned WidenVecLen;
4311         SDValue ExtractElementIdx;
4312         SDValue ExtractBitIdx;
4313         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4314         MVT LargestEltVT = MVT::getIntegerVT(
4315             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4316         if (NumElts <= LargestEltVT.getSizeInBits()) {
4317           assert(isPowerOf2_32(NumElts) &&
4318                  "the number of elements should be power of 2");
4319           WideEltVT = MVT::getIntegerVT(NumElts);
4320           WidenVecLen = 1;
4321           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4322           ExtractBitIdx = Idx;
4323         } else {
4324           WideEltVT = LargestEltVT;
4325           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4326           // extract element index = index / element width
4327           ExtractElementIdx = DAG.getNode(
4328               ISD::SRL, DL, XLenVT, Idx,
4329               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4330           // mask bit index = index % element width
4331           ExtractBitIdx = DAG.getNode(
4332               ISD::AND, DL, XLenVT, Idx,
4333               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4334         }
4335         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4336         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4337         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4338                                          Vec, ExtractElementIdx);
4339         // Extract the bit from GPR.
4340         SDValue ShiftRight =
4341             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4342         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4343                            DAG.getConstant(1, DL, XLenVT));
4344       }
4345     }
4346     // Otherwise, promote to an i8 vector and extract from that.
4347     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4348     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4349     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4350   }
4351 
4352   // If this is a fixed vector, we need to convert it to a scalable vector.
4353   MVT ContainerVT = VecVT;
4354   if (VecVT.isFixedLengthVector()) {
4355     ContainerVT = getContainerForFixedLengthVector(VecVT);
4356     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4357   }
4358 
4359   // If the index is 0, the vector is already in the right position.
4360   if (!isNullConstant(Idx)) {
4361     // Use a VL of 1 to avoid processing more elements than we need.
4362     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4363     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4364     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4365     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4366                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4367   }
4368 
4369   if (!EltVT.isInteger()) {
4370     // Floating-point extracts are handled in TableGen.
4371     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4372                        DAG.getConstant(0, DL, XLenVT));
4373   }
4374 
4375   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4376   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4377 }
4378 
4379 // Some RVV intrinsics may claim that they want an integer operand to be
4380 // promoted or expanded.
4381 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4382                                           const RISCVSubtarget &Subtarget) {
4383   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4384           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4385          "Unexpected opcode");
4386 
4387   if (!Subtarget.hasVInstructions())
4388     return SDValue();
4389 
4390   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4391   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4392   SDLoc DL(Op);
4393 
4394   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4395       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4396   if (!II || !II->hasSplatOperand())
4397     return SDValue();
4398 
4399   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4400   assert(SplatOp < Op.getNumOperands());
4401 
4402   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4403   SDValue &ScalarOp = Operands[SplatOp];
4404   MVT OpVT = ScalarOp.getSimpleValueType();
4405   MVT XLenVT = Subtarget.getXLenVT();
4406 
4407   // If this isn't a scalar, or its type is XLenVT we're done.
4408   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4409     return SDValue();
4410 
4411   // Simplest case is that the operand needs to be promoted to XLenVT.
4412   if (OpVT.bitsLT(XLenVT)) {
4413     // If the operand is a constant, sign extend to increase our chances
4414     // of being able to use a .vi instruction. ANY_EXTEND would become a
4415     // a zero extend and the simm5 check in isel would fail.
4416     // FIXME: Should we ignore the upper bits in isel instead?
4417     unsigned ExtOpc =
4418         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4419     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4420     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4421   }
4422 
4423   // Use the previous operand to get the vXi64 VT. The result might be a mask
4424   // VT for compares. Using the previous operand assumes that the previous
4425   // operand will never have a smaller element size than a scalar operand and
4426   // that a widening operation never uses SEW=64.
4427   // NOTE: If this fails the below assert, we can probably just find the
4428   // element count from any operand or result and use it to construct the VT.
4429   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4430   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4431 
4432   // The more complex case is when the scalar is larger than XLenVT.
4433   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4434          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4435 
4436   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4437   // on the instruction to sign-extend since SEW>XLEN.
4438   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4439     if (isInt<32>(CVal->getSExtValue())) {
4440       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4441       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4442     }
4443   }
4444 
4445   // We need to convert the scalar to a splat vector.
4446   // FIXME: Can we implicitly truncate the scalar if it is known to
4447   // be sign extended?
4448   SDValue VL = getVLOperand(Op);
4449   assert(VL.getValueType() == XLenVT);
4450   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
4451   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4452 }
4453 
4454 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4455                                                      SelectionDAG &DAG) const {
4456   unsigned IntNo = Op.getConstantOperandVal(0);
4457   SDLoc DL(Op);
4458   MVT XLenVT = Subtarget.getXLenVT();
4459 
4460   switch (IntNo) {
4461   default:
4462     break; // Don't custom lower most intrinsics.
4463   case Intrinsic::thread_pointer: {
4464     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4465     return DAG.getRegister(RISCV::X4, PtrVT);
4466   }
4467   case Intrinsic::riscv_orc_b:
4468     // Lower to the GORCI encoding for orc.b.
4469     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
4470                        DAG.getConstant(7, DL, XLenVT));
4471   case Intrinsic::riscv_grev:
4472   case Intrinsic::riscv_gorc: {
4473     unsigned Opc =
4474         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4475     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4476   }
4477   case Intrinsic::riscv_shfl:
4478   case Intrinsic::riscv_unshfl: {
4479     unsigned Opc =
4480         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4481     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4482   }
4483   case Intrinsic::riscv_bcompress:
4484   case Intrinsic::riscv_bdecompress: {
4485     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4486                                                        : RISCVISD::BDECOMPRESS;
4487     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4488   }
4489   case Intrinsic::riscv_bfp:
4490     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4491                        Op.getOperand(2));
4492   case Intrinsic::riscv_fsl:
4493     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4494                        Op.getOperand(2), Op.getOperand(3));
4495   case Intrinsic::riscv_fsr:
4496     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4497                        Op.getOperand(2), Op.getOperand(3));
4498   case Intrinsic::riscv_vmv_x_s:
4499     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4500     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4501                        Op.getOperand(1));
4502   case Intrinsic::riscv_vmv_v_x:
4503     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4504                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4505   case Intrinsic::riscv_vfmv_v_f:
4506     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4507                        Op.getOperand(1), Op.getOperand(2));
4508   case Intrinsic::riscv_vmv_s_x: {
4509     SDValue Scalar = Op.getOperand(2);
4510 
4511     if (Scalar.getValueType().bitsLE(XLenVT)) {
4512       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4513       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4514                          Op.getOperand(1), Scalar, Op.getOperand(3));
4515     }
4516 
4517     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4518 
4519     // This is an i64 value that lives in two scalar registers. We have to
4520     // insert this in a convoluted way. First we build vXi64 splat containing
4521     // the/ two values that we assemble using some bit math. Next we'll use
4522     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4523     // to merge element 0 from our splat into the source vector.
4524     // FIXME: This is probably not the best way to do this, but it is
4525     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4526     // point.
4527     //   sw lo, (a0)
4528     //   sw hi, 4(a0)
4529     //   vlse vX, (a0)
4530     //
4531     //   vid.v      vVid
4532     //   vmseq.vx   mMask, vVid, 0
4533     //   vmerge.vvm vDest, vSrc, vVal, mMask
4534     MVT VT = Op.getSimpleValueType();
4535     SDValue Vec = Op.getOperand(1);
4536     SDValue VL = getVLOperand(Op);
4537 
4538     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4539     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4540                                       DAG.getConstant(0, DL, MVT::i32), VL);
4541 
4542     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4543     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4544     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4545     SDValue SelectCond =
4546         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4547                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4548     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4549                        Vec, VL);
4550   }
4551   case Intrinsic::riscv_vslide1up:
4552   case Intrinsic::riscv_vslide1down:
4553   case Intrinsic::riscv_vslide1up_mask:
4554   case Intrinsic::riscv_vslide1down_mask: {
4555     // We need to special case these when the scalar is larger than XLen.
4556     unsigned NumOps = Op.getNumOperands();
4557     bool IsMasked = NumOps == 7;
4558     unsigned OpOffset = IsMasked ? 1 : 0;
4559     SDValue Scalar = Op.getOperand(2 + OpOffset);
4560     if (Scalar.getValueType().bitsLE(XLenVT))
4561       break;
4562 
4563     // Splatting a sign extended constant is fine.
4564     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4565       if (isInt<32>(CVal->getSExtValue()))
4566         break;
4567 
4568     MVT VT = Op.getSimpleValueType();
4569     assert(VT.getVectorElementType() == MVT::i64 &&
4570            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4571 
4572     // Convert the vector source to the equivalent nxvXi32 vector.
4573     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4574     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4575 
4576     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4577                                    DAG.getConstant(0, DL, XLenVT));
4578     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4579                                    DAG.getConstant(1, DL, XLenVT));
4580 
4581     // Double the VL since we halved SEW.
4582     SDValue VL = getVLOperand(Op);
4583     SDValue I32VL =
4584         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4585 
4586     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4587     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4588 
4589     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4590     // instructions.
4591     if (IntNo == Intrinsic::riscv_vslide1up ||
4592         IntNo == Intrinsic::riscv_vslide1up_mask) {
4593       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4594                         I32Mask, I32VL);
4595       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4596                         I32Mask, I32VL);
4597     } else {
4598       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4599                         I32Mask, I32VL);
4600       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4601                         I32Mask, I32VL);
4602     }
4603 
4604     // Convert back to nxvXi64.
4605     Vec = DAG.getBitcast(VT, Vec);
4606 
4607     if (!IsMasked)
4608       return Vec;
4609 
4610     // Apply mask after the operation.
4611     SDValue Mask = Op.getOperand(NumOps - 3);
4612     SDValue MaskedOff = Op.getOperand(1);
4613     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4614   }
4615   }
4616 
4617   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4618 }
4619 
4620 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4621                                                     SelectionDAG &DAG) const {
4622   unsigned IntNo = Op.getConstantOperandVal(1);
4623   switch (IntNo) {
4624   default:
4625     break;
4626   case Intrinsic::riscv_masked_strided_load: {
4627     SDLoc DL(Op);
4628     MVT XLenVT = Subtarget.getXLenVT();
4629 
4630     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4631     // the selection of the masked intrinsics doesn't do this for us.
4632     SDValue Mask = Op.getOperand(5);
4633     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4634 
4635     MVT VT = Op->getSimpleValueType(0);
4636     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4637 
4638     SDValue PassThru = Op.getOperand(2);
4639     if (!IsUnmasked) {
4640       MVT MaskVT =
4641           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4642       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4643       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4644     }
4645 
4646     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4647 
4648     SDValue IntID = DAG.getTargetConstant(
4649         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4650         XLenVT);
4651 
4652     auto *Load = cast<MemIntrinsicSDNode>(Op);
4653     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4654     if (IsUnmasked)
4655       Ops.push_back(DAG.getUNDEF(ContainerVT));
4656     else
4657       Ops.push_back(PassThru);
4658     Ops.push_back(Op.getOperand(3)); // Ptr
4659     Ops.push_back(Op.getOperand(4)); // Stride
4660     if (!IsUnmasked)
4661       Ops.push_back(Mask);
4662     Ops.push_back(VL);
4663     if (!IsUnmasked) {
4664       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4665       Ops.push_back(Policy);
4666     }
4667 
4668     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4669     SDValue Result =
4670         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4671                                 Load->getMemoryVT(), Load->getMemOperand());
4672     SDValue Chain = Result.getValue(1);
4673     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4674     return DAG.getMergeValues({Result, Chain}, DL);
4675   }
4676   }
4677 
4678   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4679 }
4680 
4681 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4682                                                  SelectionDAG &DAG) const {
4683   unsigned IntNo = Op.getConstantOperandVal(1);
4684   switch (IntNo) {
4685   default:
4686     break;
4687   case Intrinsic::riscv_masked_strided_store: {
4688     SDLoc DL(Op);
4689     MVT XLenVT = Subtarget.getXLenVT();
4690 
4691     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4692     // the selection of the masked intrinsics doesn't do this for us.
4693     SDValue Mask = Op.getOperand(5);
4694     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4695 
4696     SDValue Val = Op.getOperand(2);
4697     MVT VT = Val.getSimpleValueType();
4698     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4699 
4700     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4701     if (!IsUnmasked) {
4702       MVT MaskVT =
4703           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4704       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4705     }
4706 
4707     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4708 
4709     SDValue IntID = DAG.getTargetConstant(
4710         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4711         XLenVT);
4712 
4713     auto *Store = cast<MemIntrinsicSDNode>(Op);
4714     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4715     Ops.push_back(Val);
4716     Ops.push_back(Op.getOperand(3)); // Ptr
4717     Ops.push_back(Op.getOperand(4)); // Stride
4718     if (!IsUnmasked)
4719       Ops.push_back(Mask);
4720     Ops.push_back(VL);
4721 
4722     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4723                                    Ops, Store->getMemoryVT(),
4724                                    Store->getMemOperand());
4725   }
4726   }
4727 
4728   return SDValue();
4729 }
4730 
4731 static MVT getLMUL1VT(MVT VT) {
4732   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4733          "Unexpected vector MVT");
4734   return MVT::getScalableVectorVT(
4735       VT.getVectorElementType(),
4736       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4737 }
4738 
4739 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4740   switch (ISDOpcode) {
4741   default:
4742     llvm_unreachable("Unhandled reduction");
4743   case ISD::VECREDUCE_ADD:
4744     return RISCVISD::VECREDUCE_ADD_VL;
4745   case ISD::VECREDUCE_UMAX:
4746     return RISCVISD::VECREDUCE_UMAX_VL;
4747   case ISD::VECREDUCE_SMAX:
4748     return RISCVISD::VECREDUCE_SMAX_VL;
4749   case ISD::VECREDUCE_UMIN:
4750     return RISCVISD::VECREDUCE_UMIN_VL;
4751   case ISD::VECREDUCE_SMIN:
4752     return RISCVISD::VECREDUCE_SMIN_VL;
4753   case ISD::VECREDUCE_AND:
4754     return RISCVISD::VECREDUCE_AND_VL;
4755   case ISD::VECREDUCE_OR:
4756     return RISCVISD::VECREDUCE_OR_VL;
4757   case ISD::VECREDUCE_XOR:
4758     return RISCVISD::VECREDUCE_XOR_VL;
4759   }
4760 }
4761 
4762 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4763                                                          SelectionDAG &DAG,
4764                                                          bool IsVP) const {
4765   SDLoc DL(Op);
4766   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4767   MVT VecVT = Vec.getSimpleValueType();
4768   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4769           Op.getOpcode() == ISD::VECREDUCE_OR ||
4770           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4771           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4772           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4773           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4774          "Unexpected reduction lowering");
4775 
4776   MVT XLenVT = Subtarget.getXLenVT();
4777   assert(Op.getValueType() == XLenVT &&
4778          "Expected reduction output to be legalized to XLenVT");
4779 
4780   MVT ContainerVT = VecVT;
4781   if (VecVT.isFixedLengthVector()) {
4782     ContainerVT = getContainerForFixedLengthVector(VecVT);
4783     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4784   }
4785 
4786   SDValue Mask, VL;
4787   if (IsVP) {
4788     Mask = Op.getOperand(2);
4789     VL = Op.getOperand(3);
4790   } else {
4791     std::tie(Mask, VL) =
4792         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4793   }
4794 
4795   unsigned BaseOpc;
4796   ISD::CondCode CC;
4797   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4798 
4799   switch (Op.getOpcode()) {
4800   default:
4801     llvm_unreachable("Unhandled reduction");
4802   case ISD::VECREDUCE_AND:
4803   case ISD::VP_REDUCE_AND: {
4804     // vcpop ~x == 0
4805     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4806     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4807     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4808     CC = ISD::SETEQ;
4809     BaseOpc = ISD::AND;
4810     break;
4811   }
4812   case ISD::VECREDUCE_OR:
4813   case ISD::VP_REDUCE_OR:
4814     // vcpop x != 0
4815     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4816     CC = ISD::SETNE;
4817     BaseOpc = ISD::OR;
4818     break;
4819   case ISD::VECREDUCE_XOR:
4820   case ISD::VP_REDUCE_XOR: {
4821     // ((vcpop x) & 1) != 0
4822     SDValue One = DAG.getConstant(1, DL, XLenVT);
4823     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4824     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4825     CC = ISD::SETNE;
4826     BaseOpc = ISD::XOR;
4827     break;
4828   }
4829   }
4830 
4831   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4832 
4833   if (!IsVP)
4834     return SetCC;
4835 
4836   // Now include the start value in the operation.
4837   // Note that we must return the start value when no elements are operated
4838   // upon. The vcpop instructions we've emitted in each case above will return
4839   // 0 for an inactive vector, and so we've already received the neutral value:
4840   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4841   // can simply include the start value.
4842   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4843 }
4844 
4845 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4846                                             SelectionDAG &DAG) const {
4847   SDLoc DL(Op);
4848   SDValue Vec = Op.getOperand(0);
4849   EVT VecEVT = Vec.getValueType();
4850 
4851   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4852 
4853   // Due to ordering in legalize types we may have a vector type that needs to
4854   // be split. Do that manually so we can get down to a legal type.
4855   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4856          TargetLowering::TypeSplitVector) {
4857     SDValue Lo, Hi;
4858     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4859     VecEVT = Lo.getValueType();
4860     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4861   }
4862 
4863   // TODO: The type may need to be widened rather than split. Or widened before
4864   // it can be split.
4865   if (!isTypeLegal(VecEVT))
4866     return SDValue();
4867 
4868   MVT VecVT = VecEVT.getSimpleVT();
4869   MVT VecEltVT = VecVT.getVectorElementType();
4870   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4871 
4872   MVT ContainerVT = VecVT;
4873   if (VecVT.isFixedLengthVector()) {
4874     ContainerVT = getContainerForFixedLengthVector(VecVT);
4875     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4876   }
4877 
4878   MVT M1VT = getLMUL1VT(ContainerVT);
4879   MVT XLenVT = Subtarget.getXLenVT();
4880 
4881   SDValue Mask, VL;
4882   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4883 
4884   SDValue NeutralElem =
4885       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4886   SDValue IdentitySplat = lowerScalarSplat(
4887       NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4888   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4889                                   IdentitySplat, Mask, VL);
4890   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4891                              DAG.getConstant(0, DL, XLenVT));
4892   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4893 }
4894 
4895 // Given a reduction op, this function returns the matching reduction opcode,
4896 // the vector SDValue and the scalar SDValue required to lower this to a
4897 // RISCVISD node.
4898 static std::tuple<unsigned, SDValue, SDValue>
4899 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4900   SDLoc DL(Op);
4901   auto Flags = Op->getFlags();
4902   unsigned Opcode = Op.getOpcode();
4903   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4904   switch (Opcode) {
4905   default:
4906     llvm_unreachable("Unhandled reduction");
4907   case ISD::VECREDUCE_FADD: {
4908     // Use positive zero if we can. It is cheaper to materialize.
4909     SDValue Zero =
4910         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
4911     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
4912   }
4913   case ISD::VECREDUCE_SEQ_FADD:
4914     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4915                            Op.getOperand(0));
4916   case ISD::VECREDUCE_FMIN:
4917     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4918                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4919   case ISD::VECREDUCE_FMAX:
4920     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4921                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4922   }
4923 }
4924 
4925 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4926                                               SelectionDAG &DAG) const {
4927   SDLoc DL(Op);
4928   MVT VecEltVT = Op.getSimpleValueType();
4929 
4930   unsigned RVVOpcode;
4931   SDValue VectorVal, ScalarVal;
4932   std::tie(RVVOpcode, VectorVal, ScalarVal) =
4933       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4934   MVT VecVT = VectorVal.getSimpleValueType();
4935 
4936   MVT ContainerVT = VecVT;
4937   if (VecVT.isFixedLengthVector()) {
4938     ContainerVT = getContainerForFixedLengthVector(VecVT);
4939     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4940   }
4941 
4942   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4943   MVT XLenVT = Subtarget.getXLenVT();
4944 
4945   SDValue Mask, VL;
4946   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4947 
4948   SDValue ScalarSplat = lowerScalarSplat(
4949       ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
4950   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4951                                   VectorVal, ScalarSplat, Mask, VL);
4952   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4953                      DAG.getConstant(0, DL, XLenVT));
4954 }
4955 
4956 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4957   switch (ISDOpcode) {
4958   default:
4959     llvm_unreachable("Unhandled reduction");
4960   case ISD::VP_REDUCE_ADD:
4961     return RISCVISD::VECREDUCE_ADD_VL;
4962   case ISD::VP_REDUCE_UMAX:
4963     return RISCVISD::VECREDUCE_UMAX_VL;
4964   case ISD::VP_REDUCE_SMAX:
4965     return RISCVISD::VECREDUCE_SMAX_VL;
4966   case ISD::VP_REDUCE_UMIN:
4967     return RISCVISD::VECREDUCE_UMIN_VL;
4968   case ISD::VP_REDUCE_SMIN:
4969     return RISCVISD::VECREDUCE_SMIN_VL;
4970   case ISD::VP_REDUCE_AND:
4971     return RISCVISD::VECREDUCE_AND_VL;
4972   case ISD::VP_REDUCE_OR:
4973     return RISCVISD::VECREDUCE_OR_VL;
4974   case ISD::VP_REDUCE_XOR:
4975     return RISCVISD::VECREDUCE_XOR_VL;
4976   case ISD::VP_REDUCE_FADD:
4977     return RISCVISD::VECREDUCE_FADD_VL;
4978   case ISD::VP_REDUCE_SEQ_FADD:
4979     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4980   case ISD::VP_REDUCE_FMAX:
4981     return RISCVISD::VECREDUCE_FMAX_VL;
4982   case ISD::VP_REDUCE_FMIN:
4983     return RISCVISD::VECREDUCE_FMIN_VL;
4984   }
4985 }
4986 
4987 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4988                                            SelectionDAG &DAG) const {
4989   SDLoc DL(Op);
4990   SDValue Vec = Op.getOperand(1);
4991   EVT VecEVT = Vec.getValueType();
4992 
4993   // TODO: The type may need to be widened rather than split. Or widened before
4994   // it can be split.
4995   if (!isTypeLegal(VecEVT))
4996     return SDValue();
4997 
4998   MVT VecVT = VecEVT.getSimpleVT();
4999   MVT VecEltVT = VecVT.getVectorElementType();
5000   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5001 
5002   MVT ContainerVT = VecVT;
5003   if (VecVT.isFixedLengthVector()) {
5004     ContainerVT = getContainerForFixedLengthVector(VecVT);
5005     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5006   }
5007 
5008   SDValue VL = Op.getOperand(3);
5009   SDValue Mask = Op.getOperand(2);
5010 
5011   MVT M1VT = getLMUL1VT(ContainerVT);
5012   MVT XLenVT = Subtarget.getXLenVT();
5013   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5014 
5015   SDValue StartSplat =
5016       lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT,
5017                        DL, DAG, Subtarget);
5018   SDValue Reduction =
5019       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5020   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5021                              DAG.getConstant(0, DL, XLenVT));
5022   if (!VecVT.isInteger())
5023     return Elt0;
5024   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5025 }
5026 
5027 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5028                                                    SelectionDAG &DAG) const {
5029   SDValue Vec = Op.getOperand(0);
5030   SDValue SubVec = Op.getOperand(1);
5031   MVT VecVT = Vec.getSimpleValueType();
5032   MVT SubVecVT = SubVec.getSimpleValueType();
5033 
5034   SDLoc DL(Op);
5035   MVT XLenVT = Subtarget.getXLenVT();
5036   unsigned OrigIdx = Op.getConstantOperandVal(2);
5037   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5038 
5039   // We don't have the ability to slide mask vectors up indexed by their i1
5040   // elements; the smallest we can do is i8. Often we are able to bitcast to
5041   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5042   // into a scalable one, we might not necessarily have enough scalable
5043   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5044   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5045       (OrigIdx != 0 || !Vec.isUndef())) {
5046     if (VecVT.getVectorMinNumElements() >= 8 &&
5047         SubVecVT.getVectorMinNumElements() >= 8) {
5048       assert(OrigIdx % 8 == 0 && "Invalid index");
5049       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5050              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5051              "Unexpected mask vector lowering");
5052       OrigIdx /= 8;
5053       SubVecVT =
5054           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5055                            SubVecVT.isScalableVector());
5056       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5057                                VecVT.isScalableVector());
5058       Vec = DAG.getBitcast(VecVT, Vec);
5059       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5060     } else {
5061       // We can't slide this mask vector up indexed by its i1 elements.
5062       // This poses a problem when we wish to insert a scalable vector which
5063       // can't be re-expressed as a larger type. Just choose the slow path and
5064       // extend to a larger type, then truncate back down.
5065       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5066       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5067       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5068       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5069       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5070                         Op.getOperand(2));
5071       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5072       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5073     }
5074   }
5075 
5076   // If the subvector vector is a fixed-length type, we cannot use subregister
5077   // manipulation to simplify the codegen; we don't know which register of a
5078   // LMUL group contains the specific subvector as we only know the minimum
5079   // register size. Therefore we must slide the vector group up the full
5080   // amount.
5081   if (SubVecVT.isFixedLengthVector()) {
5082     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5083       return Op;
5084     MVT ContainerVT = VecVT;
5085     if (VecVT.isFixedLengthVector()) {
5086       ContainerVT = getContainerForFixedLengthVector(VecVT);
5087       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5088     }
5089     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5090                          DAG.getUNDEF(ContainerVT), SubVec,
5091                          DAG.getConstant(0, DL, XLenVT));
5092     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5093       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5094       return DAG.getBitcast(Op.getValueType(), SubVec);
5095     }
5096     SDValue Mask =
5097         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5098     // Set the vector length to only the number of elements we care about. Note
5099     // that for slideup this includes the offset.
5100     SDValue VL =
5101         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5102     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5103     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5104                                   SubVec, SlideupAmt, Mask, VL);
5105     if (VecVT.isFixedLengthVector())
5106       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5107     return DAG.getBitcast(Op.getValueType(), Slideup);
5108   }
5109 
5110   unsigned SubRegIdx, RemIdx;
5111   std::tie(SubRegIdx, RemIdx) =
5112       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5113           VecVT, SubVecVT, OrigIdx, TRI);
5114 
5115   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5116   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5117                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5118                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5119 
5120   // 1. If the Idx has been completely eliminated and this subvector's size is
5121   // a vector register or a multiple thereof, or the surrounding elements are
5122   // undef, then this is a subvector insert which naturally aligns to a vector
5123   // register. These can easily be handled using subregister manipulation.
5124   // 2. If the subvector is smaller than a vector register, then the insertion
5125   // must preserve the undisturbed elements of the register. We do this by
5126   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5127   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5128   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5129   // LMUL=1 type back into the larger vector (resolving to another subregister
5130   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5131   // to avoid allocating a large register group to hold our subvector.
5132   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5133     return Op;
5134 
5135   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5136   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5137   // (in our case undisturbed). This means we can set up a subvector insertion
5138   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5139   // size of the subvector.
5140   MVT InterSubVT = VecVT;
5141   SDValue AlignedExtract = Vec;
5142   unsigned AlignedIdx = OrigIdx - RemIdx;
5143   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5144     InterSubVT = getLMUL1VT(VecVT);
5145     // Extract a subvector equal to the nearest full vector register type. This
5146     // should resolve to a EXTRACT_SUBREG instruction.
5147     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5148                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5149   }
5150 
5151   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5152   // For scalable vectors this must be further multiplied by vscale.
5153   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5154 
5155   SDValue Mask, VL;
5156   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5157 
5158   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5159   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5160   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5161   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5162 
5163   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5164                        DAG.getUNDEF(InterSubVT), SubVec,
5165                        DAG.getConstant(0, DL, XLenVT));
5166 
5167   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5168                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5169 
5170   // If required, insert this subvector back into the correct vector register.
5171   // This should resolve to an INSERT_SUBREG instruction.
5172   if (VecVT.bitsGT(InterSubVT))
5173     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5174                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5175 
5176   // We might have bitcast from a mask type: cast back to the original type if
5177   // required.
5178   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5179 }
5180 
5181 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5182                                                     SelectionDAG &DAG) const {
5183   SDValue Vec = Op.getOperand(0);
5184   MVT SubVecVT = Op.getSimpleValueType();
5185   MVT VecVT = Vec.getSimpleValueType();
5186 
5187   SDLoc DL(Op);
5188   MVT XLenVT = Subtarget.getXLenVT();
5189   unsigned OrigIdx = Op.getConstantOperandVal(1);
5190   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5191 
5192   // We don't have the ability to slide mask vectors down indexed by their i1
5193   // elements; the smallest we can do is i8. Often we are able to bitcast to
5194   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5195   // from a scalable one, we might not necessarily have enough scalable
5196   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5197   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5198     if (VecVT.getVectorMinNumElements() >= 8 &&
5199         SubVecVT.getVectorMinNumElements() >= 8) {
5200       assert(OrigIdx % 8 == 0 && "Invalid index");
5201       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5202              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5203              "Unexpected mask vector lowering");
5204       OrigIdx /= 8;
5205       SubVecVT =
5206           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5207                            SubVecVT.isScalableVector());
5208       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5209                                VecVT.isScalableVector());
5210       Vec = DAG.getBitcast(VecVT, Vec);
5211     } else {
5212       // We can't slide this mask vector down, indexed by its i1 elements.
5213       // This poses a problem when we wish to extract a scalable vector which
5214       // can't be re-expressed as a larger type. Just choose the slow path and
5215       // extend to a larger type, then truncate back down.
5216       // TODO: We could probably improve this when extracting certain fixed
5217       // from fixed, where we can extract as i8 and shift the correct element
5218       // right to reach the desired subvector?
5219       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5220       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5221       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5222       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5223                         Op.getOperand(1));
5224       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5225       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5226     }
5227   }
5228 
5229   // If the subvector vector is a fixed-length type, we cannot use subregister
5230   // manipulation to simplify the codegen; we don't know which register of a
5231   // LMUL group contains the specific subvector as we only know the minimum
5232   // register size. Therefore we must slide the vector group down the full
5233   // amount.
5234   if (SubVecVT.isFixedLengthVector()) {
5235     // With an index of 0 this is a cast-like subvector, which can be performed
5236     // with subregister operations.
5237     if (OrigIdx == 0)
5238       return Op;
5239     MVT ContainerVT = VecVT;
5240     if (VecVT.isFixedLengthVector()) {
5241       ContainerVT = getContainerForFixedLengthVector(VecVT);
5242       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5243     }
5244     SDValue Mask =
5245         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5246     // Set the vector length to only the number of elements we care about. This
5247     // avoids sliding down elements we're going to discard straight away.
5248     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5249     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5250     SDValue Slidedown =
5251         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5252                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5253     // Now we can use a cast-like subvector extract to get the result.
5254     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5255                             DAG.getConstant(0, DL, XLenVT));
5256     return DAG.getBitcast(Op.getValueType(), Slidedown);
5257   }
5258 
5259   unsigned SubRegIdx, RemIdx;
5260   std::tie(SubRegIdx, RemIdx) =
5261       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5262           VecVT, SubVecVT, OrigIdx, TRI);
5263 
5264   // If the Idx has been completely eliminated then this is a subvector extract
5265   // which naturally aligns to a vector register. These can easily be handled
5266   // using subregister manipulation.
5267   if (RemIdx == 0)
5268     return Op;
5269 
5270   // Else we must shift our vector register directly to extract the subvector.
5271   // Do this using VSLIDEDOWN.
5272 
5273   // If the vector type is an LMUL-group type, extract a subvector equal to the
5274   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5275   // instruction.
5276   MVT InterSubVT = VecVT;
5277   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5278     InterSubVT = getLMUL1VT(VecVT);
5279     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5280                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5281   }
5282 
5283   // Slide this vector register down by the desired number of elements in order
5284   // to place the desired subvector starting at element 0.
5285   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5286   // For scalable vectors this must be further multiplied by vscale.
5287   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5288 
5289   SDValue Mask, VL;
5290   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5291   SDValue Slidedown =
5292       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5293                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5294 
5295   // Now the vector is in the right position, extract our final subvector. This
5296   // should resolve to a COPY.
5297   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5298                           DAG.getConstant(0, DL, XLenVT));
5299 
5300   // We might have bitcast from a mask type: cast back to the original type if
5301   // required.
5302   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5303 }
5304 
5305 // Lower step_vector to the vid instruction. Any non-identity step value must
5306 // be accounted for my manual expansion.
5307 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5308                                               SelectionDAG &DAG) const {
5309   SDLoc DL(Op);
5310   MVT VT = Op.getSimpleValueType();
5311   MVT XLenVT = Subtarget.getXLenVT();
5312   SDValue Mask, VL;
5313   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5314   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5315   uint64_t StepValImm = Op.getConstantOperandVal(0);
5316   if (StepValImm != 1) {
5317     if (isPowerOf2_64(StepValImm)) {
5318       SDValue StepVal =
5319           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
5320                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5321       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5322     } else {
5323       SDValue StepVal = lowerScalarSplat(
5324           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
5325           DL, DAG, Subtarget);
5326       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5327     }
5328   }
5329   return StepVec;
5330 }
5331 
5332 // Implement vector_reverse using vrgather.vv with indices determined by
5333 // subtracting the id of each element from (VLMAX-1). This will convert
5334 // the indices like so:
5335 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5336 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5337 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5338                                                  SelectionDAG &DAG) const {
5339   SDLoc DL(Op);
5340   MVT VecVT = Op.getSimpleValueType();
5341   unsigned EltSize = VecVT.getScalarSizeInBits();
5342   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5343 
5344   unsigned MaxVLMAX = 0;
5345   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5346   if (VectorBitsMax != 0)
5347     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5348 
5349   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5350   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5351 
5352   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5353   // to use vrgatherei16.vv.
5354   // TODO: It's also possible to use vrgatherei16.vv for other types to
5355   // decrease register width for the index calculation.
5356   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5357     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5358     // Reverse each half, then reassemble them in reverse order.
5359     // NOTE: It's also possible that after splitting that VLMAX no longer
5360     // requires vrgatherei16.vv.
5361     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5362       SDValue Lo, Hi;
5363       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5364       EVT LoVT, HiVT;
5365       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5366       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5367       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5368       // Reassemble the low and high pieces reversed.
5369       // FIXME: This is a CONCAT_VECTORS.
5370       SDValue Res =
5371           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5372                       DAG.getIntPtrConstant(0, DL));
5373       return DAG.getNode(
5374           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5375           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5376     }
5377 
5378     // Just promote the int type to i16 which will double the LMUL.
5379     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5380     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5381   }
5382 
5383   MVT XLenVT = Subtarget.getXLenVT();
5384   SDValue Mask, VL;
5385   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5386 
5387   // Calculate VLMAX-1 for the desired SEW.
5388   unsigned MinElts = VecVT.getVectorMinNumElements();
5389   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5390                               DAG.getConstant(MinElts, DL, XLenVT));
5391   SDValue VLMinus1 =
5392       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5393 
5394   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5395   bool IsRV32E64 =
5396       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5397   SDValue SplatVL;
5398   if (!IsRV32E64)
5399     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5400   else
5401     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
5402 
5403   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5404   SDValue Indices =
5405       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5406 
5407   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5408 }
5409 
5410 SDValue
5411 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5412                                                      SelectionDAG &DAG) const {
5413   SDLoc DL(Op);
5414   auto *Load = cast<LoadSDNode>(Op);
5415 
5416   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5417                                         Load->getMemoryVT(),
5418                                         *Load->getMemOperand()) &&
5419          "Expecting a correctly-aligned load");
5420 
5421   MVT VT = Op.getSimpleValueType();
5422   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5423 
5424   SDValue VL =
5425       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5426 
5427   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5428   SDValue NewLoad = DAG.getMemIntrinsicNode(
5429       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5430       Load->getMemoryVT(), Load->getMemOperand());
5431 
5432   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5433   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5434 }
5435 
5436 SDValue
5437 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5438                                                       SelectionDAG &DAG) const {
5439   SDLoc DL(Op);
5440   auto *Store = cast<StoreSDNode>(Op);
5441 
5442   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5443                                         Store->getMemoryVT(),
5444                                         *Store->getMemOperand()) &&
5445          "Expecting a correctly-aligned store");
5446 
5447   SDValue StoreVal = Store->getValue();
5448   MVT VT = StoreVal.getSimpleValueType();
5449 
5450   // If the size less than a byte, we need to pad with zeros to make a byte.
5451   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5452     VT = MVT::v8i1;
5453     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5454                            DAG.getConstant(0, DL, VT), StoreVal,
5455                            DAG.getIntPtrConstant(0, DL));
5456   }
5457 
5458   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5459 
5460   SDValue VL =
5461       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5462 
5463   SDValue NewValue =
5464       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5465   return DAG.getMemIntrinsicNode(
5466       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5467       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5468       Store->getMemoryVT(), Store->getMemOperand());
5469 }
5470 
5471 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5472                                              SelectionDAG &DAG) const {
5473   SDLoc DL(Op);
5474   MVT VT = Op.getSimpleValueType();
5475 
5476   const auto *MemSD = cast<MemSDNode>(Op);
5477   EVT MemVT = MemSD->getMemoryVT();
5478   MachineMemOperand *MMO = MemSD->getMemOperand();
5479   SDValue Chain = MemSD->getChain();
5480   SDValue BasePtr = MemSD->getBasePtr();
5481 
5482   SDValue Mask, PassThru, VL;
5483   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5484     Mask = VPLoad->getMask();
5485     PassThru = DAG.getUNDEF(VT);
5486     VL = VPLoad->getVectorLength();
5487   } else {
5488     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5489     Mask = MLoad->getMask();
5490     PassThru = MLoad->getPassThru();
5491   }
5492 
5493   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5494 
5495   MVT XLenVT = Subtarget.getXLenVT();
5496 
5497   MVT ContainerVT = VT;
5498   if (VT.isFixedLengthVector()) {
5499     ContainerVT = getContainerForFixedLengthVector(VT);
5500     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5501     if (!IsUnmasked) {
5502       MVT MaskVT =
5503           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5504       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5505     }
5506   }
5507 
5508   if (!VL)
5509     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5510 
5511   unsigned IntID =
5512       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5513   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5514   if (IsUnmasked)
5515     Ops.push_back(DAG.getUNDEF(ContainerVT));
5516   else
5517     Ops.push_back(PassThru);
5518   Ops.push_back(BasePtr);
5519   if (!IsUnmasked)
5520     Ops.push_back(Mask);
5521   Ops.push_back(VL);
5522   if (!IsUnmasked)
5523     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5524 
5525   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5526 
5527   SDValue Result =
5528       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5529   Chain = Result.getValue(1);
5530 
5531   if (VT.isFixedLengthVector())
5532     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5533 
5534   return DAG.getMergeValues({Result, Chain}, DL);
5535 }
5536 
5537 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5538                                               SelectionDAG &DAG) const {
5539   SDLoc DL(Op);
5540 
5541   const auto *MemSD = cast<MemSDNode>(Op);
5542   EVT MemVT = MemSD->getMemoryVT();
5543   MachineMemOperand *MMO = MemSD->getMemOperand();
5544   SDValue Chain = MemSD->getChain();
5545   SDValue BasePtr = MemSD->getBasePtr();
5546   SDValue Val, Mask, VL;
5547 
5548   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5549     Val = VPStore->getValue();
5550     Mask = VPStore->getMask();
5551     VL = VPStore->getVectorLength();
5552   } else {
5553     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5554     Val = MStore->getValue();
5555     Mask = MStore->getMask();
5556   }
5557 
5558   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5559 
5560   MVT VT = Val.getSimpleValueType();
5561   MVT XLenVT = Subtarget.getXLenVT();
5562 
5563   MVT ContainerVT = VT;
5564   if (VT.isFixedLengthVector()) {
5565     ContainerVT = getContainerForFixedLengthVector(VT);
5566 
5567     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5568     if (!IsUnmasked) {
5569       MVT MaskVT =
5570           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5571       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5572     }
5573   }
5574 
5575   if (!VL)
5576     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5577 
5578   unsigned IntID =
5579       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5580   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5581   Ops.push_back(Val);
5582   Ops.push_back(BasePtr);
5583   if (!IsUnmasked)
5584     Ops.push_back(Mask);
5585   Ops.push_back(VL);
5586 
5587   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5588                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5589 }
5590 
5591 SDValue
5592 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5593                                                       SelectionDAG &DAG) const {
5594   MVT InVT = Op.getOperand(0).getSimpleValueType();
5595   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5596 
5597   MVT VT = Op.getSimpleValueType();
5598 
5599   SDValue Op1 =
5600       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5601   SDValue Op2 =
5602       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5603 
5604   SDLoc DL(Op);
5605   SDValue VL =
5606       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5607 
5608   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5609   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5610 
5611   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5612                             Op.getOperand(2), Mask, VL);
5613 
5614   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5615 }
5616 
5617 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5618     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5619   MVT VT = Op.getSimpleValueType();
5620 
5621   if (VT.getVectorElementType() == MVT::i1)
5622     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5623 
5624   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5625 }
5626 
5627 SDValue
5628 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5629                                                       SelectionDAG &DAG) const {
5630   unsigned Opc;
5631   switch (Op.getOpcode()) {
5632   default: llvm_unreachable("Unexpected opcode!");
5633   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5634   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5635   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5636   }
5637 
5638   return lowerToScalableOp(Op, DAG, Opc);
5639 }
5640 
5641 // Lower vector ABS to smax(X, sub(0, X)).
5642 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5643   SDLoc DL(Op);
5644   MVT VT = Op.getSimpleValueType();
5645   SDValue X = Op.getOperand(0);
5646 
5647   assert(VT.isFixedLengthVector() && "Unexpected type");
5648 
5649   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5650   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5651 
5652   SDValue Mask, VL;
5653   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5654 
5655   SDValue SplatZero =
5656       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5657                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5658   SDValue NegX =
5659       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5660   SDValue Max =
5661       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5662 
5663   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5664 }
5665 
5666 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5667     SDValue Op, SelectionDAG &DAG) const {
5668   SDLoc DL(Op);
5669   MVT VT = Op.getSimpleValueType();
5670   SDValue Mag = Op.getOperand(0);
5671   SDValue Sign = Op.getOperand(1);
5672   assert(Mag.getValueType() == Sign.getValueType() &&
5673          "Can only handle COPYSIGN with matching types.");
5674 
5675   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5676   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5677   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5678 
5679   SDValue Mask, VL;
5680   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5681 
5682   SDValue CopySign =
5683       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5684 
5685   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5686 }
5687 
5688 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5689     SDValue Op, SelectionDAG &DAG) const {
5690   MVT VT = Op.getSimpleValueType();
5691   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5692 
5693   MVT I1ContainerVT =
5694       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5695 
5696   SDValue CC =
5697       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5698   SDValue Op1 =
5699       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5700   SDValue Op2 =
5701       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5702 
5703   SDLoc DL(Op);
5704   SDValue Mask, VL;
5705   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5706 
5707   SDValue Select =
5708       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5709 
5710   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5711 }
5712 
5713 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5714                                                unsigned NewOpc,
5715                                                bool HasMask) const {
5716   MVT VT = Op.getSimpleValueType();
5717   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5718 
5719   // Create list of operands by converting existing ones to scalable types.
5720   SmallVector<SDValue, 6> Ops;
5721   for (const SDValue &V : Op->op_values()) {
5722     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5723 
5724     // Pass through non-vector operands.
5725     if (!V.getValueType().isVector()) {
5726       Ops.push_back(V);
5727       continue;
5728     }
5729 
5730     // "cast" fixed length vector to a scalable vector.
5731     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5732            "Only fixed length vectors are supported!");
5733     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5734   }
5735 
5736   SDLoc DL(Op);
5737   SDValue Mask, VL;
5738   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5739   if (HasMask)
5740     Ops.push_back(Mask);
5741   Ops.push_back(VL);
5742 
5743   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5744   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5745 }
5746 
5747 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5748 // * Operands of each node are assumed to be in the same order.
5749 // * The EVL operand is promoted from i32 to i64 on RV64.
5750 // * Fixed-length vectors are converted to their scalable-vector container
5751 //   types.
5752 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5753                                        unsigned RISCVISDOpc) const {
5754   SDLoc DL(Op);
5755   MVT VT = Op.getSimpleValueType();
5756   SmallVector<SDValue, 4> Ops;
5757 
5758   for (const auto &OpIdx : enumerate(Op->ops())) {
5759     SDValue V = OpIdx.value();
5760     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5761     // Pass through operands which aren't fixed-length vectors.
5762     if (!V.getValueType().isFixedLengthVector()) {
5763       Ops.push_back(V);
5764       continue;
5765     }
5766     // "cast" fixed length vector to a scalable vector.
5767     MVT OpVT = V.getSimpleValueType();
5768     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5769     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5770            "Only fixed length vectors are supported!");
5771     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5772   }
5773 
5774   if (!VT.isFixedLengthVector())
5775     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5776 
5777   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5778 
5779   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5780 
5781   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5782 }
5783 
5784 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5785                                             unsigned MaskOpc,
5786                                             unsigned VecOpc) const {
5787   MVT VT = Op.getSimpleValueType();
5788   if (VT.getVectorElementType() != MVT::i1)
5789     return lowerVPOp(Op, DAG, VecOpc);
5790 
5791   // It is safe to drop mask parameter as masked-off elements are undef.
5792   SDValue Op1 = Op->getOperand(0);
5793   SDValue Op2 = Op->getOperand(1);
5794   SDValue VL = Op->getOperand(3);
5795 
5796   MVT ContainerVT = VT;
5797   const bool IsFixed = VT.isFixedLengthVector();
5798   if (IsFixed) {
5799     ContainerVT = getContainerForFixedLengthVector(VT);
5800     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
5801     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
5802   }
5803 
5804   SDLoc DL(Op);
5805   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
5806   if (!IsFixed)
5807     return Val;
5808   return convertFromScalableVector(VT, Val, DAG, Subtarget);
5809 }
5810 
5811 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5812 // matched to a RVV indexed load. The RVV indexed load instructions only
5813 // support the "unsigned unscaled" addressing mode; indices are implicitly
5814 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5815 // signed or scaled indexing is extended to the XLEN value type and scaled
5816 // accordingly.
5817 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5818                                                SelectionDAG &DAG) const {
5819   SDLoc DL(Op);
5820   MVT VT = Op.getSimpleValueType();
5821 
5822   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5823   EVT MemVT = MemSD->getMemoryVT();
5824   MachineMemOperand *MMO = MemSD->getMemOperand();
5825   SDValue Chain = MemSD->getChain();
5826   SDValue BasePtr = MemSD->getBasePtr();
5827 
5828   ISD::LoadExtType LoadExtType;
5829   SDValue Index, Mask, PassThru, VL;
5830 
5831   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5832     Index = VPGN->getIndex();
5833     Mask = VPGN->getMask();
5834     PassThru = DAG.getUNDEF(VT);
5835     VL = VPGN->getVectorLength();
5836     // VP doesn't support extending loads.
5837     LoadExtType = ISD::NON_EXTLOAD;
5838   } else {
5839     // Else it must be a MGATHER.
5840     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5841     Index = MGN->getIndex();
5842     Mask = MGN->getMask();
5843     PassThru = MGN->getPassThru();
5844     LoadExtType = MGN->getExtensionType();
5845   }
5846 
5847   MVT IndexVT = Index.getSimpleValueType();
5848   MVT XLenVT = Subtarget.getXLenVT();
5849 
5850   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5851          "Unexpected VTs!");
5852   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5853   // Targets have to explicitly opt-in for extending vector loads.
5854   assert(LoadExtType == ISD::NON_EXTLOAD &&
5855          "Unexpected extending MGATHER/VP_GATHER");
5856   (void)LoadExtType;
5857 
5858   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5859   // the selection of the masked intrinsics doesn't do this for us.
5860   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5861 
5862   MVT ContainerVT = VT;
5863   if (VT.isFixedLengthVector()) {
5864     // We need to use the larger of the result and index type to determine the
5865     // scalable type to use so we don't increase LMUL for any operand/result.
5866     if (VT.bitsGE(IndexVT)) {
5867       ContainerVT = getContainerForFixedLengthVector(VT);
5868       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5869                                  ContainerVT.getVectorElementCount());
5870     } else {
5871       IndexVT = getContainerForFixedLengthVector(IndexVT);
5872       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5873                                      IndexVT.getVectorElementCount());
5874     }
5875 
5876     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5877 
5878     if (!IsUnmasked) {
5879       MVT MaskVT =
5880           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5881       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5882       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5883     }
5884   }
5885 
5886   if (!VL)
5887     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5888 
5889   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
5890     IndexVT = IndexVT.changeVectorElementType(XLenVT);
5891     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
5892                                    VL);
5893     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
5894                         TrueMask, VL);
5895   }
5896 
5897   unsigned IntID =
5898       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5899   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5900   if (IsUnmasked)
5901     Ops.push_back(DAG.getUNDEF(ContainerVT));
5902   else
5903     Ops.push_back(PassThru);
5904   Ops.push_back(BasePtr);
5905   Ops.push_back(Index);
5906   if (!IsUnmasked)
5907     Ops.push_back(Mask);
5908   Ops.push_back(VL);
5909   if (!IsUnmasked)
5910     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5911 
5912   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5913   SDValue Result =
5914       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5915   Chain = Result.getValue(1);
5916 
5917   if (VT.isFixedLengthVector())
5918     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5919 
5920   return DAG.getMergeValues({Result, Chain}, DL);
5921 }
5922 
5923 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5924 // matched to a RVV indexed store. The RVV indexed store instructions only
5925 // support the "unsigned unscaled" addressing mode; indices are implicitly
5926 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5927 // signed or scaled indexing is extended to the XLEN value type and scaled
5928 // accordingly.
5929 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5930                                                 SelectionDAG &DAG) const {
5931   SDLoc DL(Op);
5932   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5933   EVT MemVT = MemSD->getMemoryVT();
5934   MachineMemOperand *MMO = MemSD->getMemOperand();
5935   SDValue Chain = MemSD->getChain();
5936   SDValue BasePtr = MemSD->getBasePtr();
5937 
5938   bool IsTruncatingStore = false;
5939   SDValue Index, Mask, Val, VL;
5940 
5941   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5942     Index = VPSN->getIndex();
5943     Mask = VPSN->getMask();
5944     Val = VPSN->getValue();
5945     VL = VPSN->getVectorLength();
5946     // VP doesn't support truncating stores.
5947     IsTruncatingStore = false;
5948   } else {
5949     // Else it must be a MSCATTER.
5950     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5951     Index = MSN->getIndex();
5952     Mask = MSN->getMask();
5953     Val = MSN->getValue();
5954     IsTruncatingStore = MSN->isTruncatingStore();
5955   }
5956 
5957   MVT VT = Val.getSimpleValueType();
5958   MVT IndexVT = Index.getSimpleValueType();
5959   MVT XLenVT = Subtarget.getXLenVT();
5960 
5961   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5962          "Unexpected VTs!");
5963   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5964   // Targets have to explicitly opt-in for extending vector loads and
5965   // truncating vector stores.
5966   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5967   (void)IsTruncatingStore;
5968 
5969   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5970   // the selection of the masked intrinsics doesn't do this for us.
5971   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5972 
5973   MVT ContainerVT = VT;
5974   if (VT.isFixedLengthVector()) {
5975     // We need to use the larger of the value and index type to determine the
5976     // scalable type to use so we don't increase LMUL for any operand/result.
5977     if (VT.bitsGE(IndexVT)) {
5978       ContainerVT = getContainerForFixedLengthVector(VT);
5979       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5980                                  ContainerVT.getVectorElementCount());
5981     } else {
5982       IndexVT = getContainerForFixedLengthVector(IndexVT);
5983       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5984                                      IndexVT.getVectorElementCount());
5985     }
5986 
5987     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5988     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5989 
5990     if (!IsUnmasked) {
5991       MVT MaskVT =
5992           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5993       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5994     }
5995   }
5996 
5997   if (!VL)
5998     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5999 
6000   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6001     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6002     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6003                                    VL);
6004     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6005                         TrueMask, VL);
6006   }
6007 
6008   unsigned IntID =
6009       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6010   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6011   Ops.push_back(Val);
6012   Ops.push_back(BasePtr);
6013   Ops.push_back(Index);
6014   if (!IsUnmasked)
6015     Ops.push_back(Mask);
6016   Ops.push_back(VL);
6017 
6018   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6019                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6020 }
6021 
6022 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6023                                                SelectionDAG &DAG) const {
6024   const MVT XLenVT = Subtarget.getXLenVT();
6025   SDLoc DL(Op);
6026   SDValue Chain = Op->getOperand(0);
6027   SDValue SysRegNo = DAG.getTargetConstant(
6028       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6029   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6030   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6031 
6032   // Encoding used for rounding mode in RISCV differs from that used in
6033   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6034   // table, which consists of a sequence of 4-bit fields, each representing
6035   // corresponding FLT_ROUNDS mode.
6036   static const int Table =
6037       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6038       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6039       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6040       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6041       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6042 
6043   SDValue Shift =
6044       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6045   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6046                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6047   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6048                                DAG.getConstant(7, DL, XLenVT));
6049 
6050   return DAG.getMergeValues({Masked, Chain}, DL);
6051 }
6052 
6053 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6054                                                SelectionDAG &DAG) const {
6055   const MVT XLenVT = Subtarget.getXLenVT();
6056   SDLoc DL(Op);
6057   SDValue Chain = Op->getOperand(0);
6058   SDValue RMValue = Op->getOperand(1);
6059   SDValue SysRegNo = DAG.getTargetConstant(
6060       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6061 
6062   // Encoding used for rounding mode in RISCV differs from that used in
6063   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6064   // a table, which consists of a sequence of 4-bit fields, each representing
6065   // corresponding RISCV mode.
6066   static const unsigned Table =
6067       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6068       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6069       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6070       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6071       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6072 
6073   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6074                               DAG.getConstant(2, DL, XLenVT));
6075   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6076                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6077   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6078                         DAG.getConstant(0x7, DL, XLenVT));
6079   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6080                      RMValue);
6081 }
6082 
6083 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6084   switch (IntNo) {
6085   default:
6086     llvm_unreachable("Unexpected Intrinsic");
6087   case Intrinsic::riscv_grev:
6088     return RISCVISD::GREVW;
6089   case Intrinsic::riscv_gorc:
6090     return RISCVISD::GORCW;
6091   case Intrinsic::riscv_bcompress:
6092     return RISCVISD::BCOMPRESSW;
6093   case Intrinsic::riscv_bdecompress:
6094     return RISCVISD::BDECOMPRESSW;
6095   case Intrinsic::riscv_bfp:
6096     return RISCVISD::BFPW;
6097   case Intrinsic::riscv_fsl:
6098     return RISCVISD::FSLW;
6099   case Intrinsic::riscv_fsr:
6100     return RISCVISD::FSRW;
6101   }
6102 }
6103 
6104 // Converts the given intrinsic to a i64 operation with any extension.
6105 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6106                                          unsigned IntNo) {
6107   SDLoc DL(N);
6108   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6109   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6110   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6111   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6112   // ReplaceNodeResults requires we maintain the same type for the return value.
6113   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6114 }
6115 
6116 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6117 // form of the given Opcode.
6118 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6119   switch (Opcode) {
6120   default:
6121     llvm_unreachable("Unexpected opcode");
6122   case ISD::SHL:
6123     return RISCVISD::SLLW;
6124   case ISD::SRA:
6125     return RISCVISD::SRAW;
6126   case ISD::SRL:
6127     return RISCVISD::SRLW;
6128   case ISD::SDIV:
6129     return RISCVISD::DIVW;
6130   case ISD::UDIV:
6131     return RISCVISD::DIVUW;
6132   case ISD::UREM:
6133     return RISCVISD::REMUW;
6134   case ISD::ROTL:
6135     return RISCVISD::ROLW;
6136   case ISD::ROTR:
6137     return RISCVISD::RORW;
6138   case RISCVISD::GREV:
6139     return RISCVISD::GREVW;
6140   case RISCVISD::GORC:
6141     return RISCVISD::GORCW;
6142   }
6143 }
6144 
6145 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6146 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6147 // otherwise be promoted to i64, making it difficult to select the
6148 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6149 // type i8/i16/i32 is lost.
6150 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6151                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6152   SDLoc DL(N);
6153   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6154   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6155   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6156   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6157   // ReplaceNodeResults requires we maintain the same type for the return value.
6158   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6159 }
6160 
6161 // Converts the given 32-bit operation to a i64 operation with signed extension
6162 // semantic to reduce the signed extension instructions.
6163 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6164   SDLoc DL(N);
6165   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6166   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6167   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6168   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6169                                DAG.getValueType(MVT::i32));
6170   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6171 }
6172 
6173 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6174                                              SmallVectorImpl<SDValue> &Results,
6175                                              SelectionDAG &DAG) const {
6176   SDLoc DL(N);
6177   switch (N->getOpcode()) {
6178   default:
6179     llvm_unreachable("Don't know how to custom type legalize this operation!");
6180   case ISD::STRICT_FP_TO_SINT:
6181   case ISD::STRICT_FP_TO_UINT:
6182   case ISD::FP_TO_SINT:
6183   case ISD::FP_TO_UINT: {
6184     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6185            "Unexpected custom legalisation");
6186     bool IsStrict = N->isStrictFPOpcode();
6187     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6188                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6189     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6190     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6191         TargetLowering::TypeSoftenFloat) {
6192       if (!isTypeLegal(Op0.getValueType()))
6193         return;
6194       if (IsStrict) {
6195         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6196                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6197         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6198         SDValue Res = DAG.getNode(
6199             Opc, DL, VTs, N->getOperand(0), Op0,
6200             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6201         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6202         Results.push_back(Res.getValue(1));
6203         return;
6204       }
6205       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6206       SDValue Res =
6207           DAG.getNode(Opc, DL, MVT::i64, Op0,
6208                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6209       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6210       return;
6211     }
6212     // If the FP type needs to be softened, emit a library call using the 'si'
6213     // version. If we left it to default legalization we'd end up with 'di'. If
6214     // the FP type doesn't need to be softened just let generic type
6215     // legalization promote the result type.
6216     RTLIB::Libcall LC;
6217     if (IsSigned)
6218       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6219     else
6220       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6221     MakeLibCallOptions CallOptions;
6222     EVT OpVT = Op0.getValueType();
6223     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6224     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6225     SDValue Result;
6226     std::tie(Result, Chain) =
6227         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6228     Results.push_back(Result);
6229     if (IsStrict)
6230       Results.push_back(Chain);
6231     break;
6232   }
6233   case ISD::READCYCLECOUNTER: {
6234     assert(!Subtarget.is64Bit() &&
6235            "READCYCLECOUNTER only has custom type legalization on riscv32");
6236 
6237     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6238     SDValue RCW =
6239         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6240 
6241     Results.push_back(
6242         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6243     Results.push_back(RCW.getValue(2));
6244     break;
6245   }
6246   case ISD::MUL: {
6247     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6248     unsigned XLen = Subtarget.getXLen();
6249     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6250     if (Size > XLen) {
6251       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6252       SDValue LHS = N->getOperand(0);
6253       SDValue RHS = N->getOperand(1);
6254       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6255 
6256       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6257       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6258       // We need exactly one side to be unsigned.
6259       if (LHSIsU == RHSIsU)
6260         return;
6261 
6262       auto MakeMULPair = [&](SDValue S, SDValue U) {
6263         MVT XLenVT = Subtarget.getXLenVT();
6264         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6265         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6266         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6267         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6268         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6269       };
6270 
6271       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6272       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6273 
6274       // The other operand should be signed, but still prefer MULH when
6275       // possible.
6276       if (RHSIsU && LHSIsS && !RHSIsS)
6277         Results.push_back(MakeMULPair(LHS, RHS));
6278       else if (LHSIsU && RHSIsS && !LHSIsS)
6279         Results.push_back(MakeMULPair(RHS, LHS));
6280 
6281       return;
6282     }
6283     LLVM_FALLTHROUGH;
6284   }
6285   case ISD::ADD:
6286   case ISD::SUB:
6287     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6288            "Unexpected custom legalisation");
6289     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6290     break;
6291   case ISD::SHL:
6292   case ISD::SRA:
6293   case ISD::SRL:
6294     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6295            "Unexpected custom legalisation");
6296     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6297       Results.push_back(customLegalizeToWOp(N, DAG));
6298       break;
6299     }
6300 
6301     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6302     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6303     // shift amount.
6304     if (N->getOpcode() == ISD::SHL) {
6305       SDLoc DL(N);
6306       SDValue NewOp0 =
6307           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6308       SDValue NewOp1 =
6309           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6310       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6311       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6312                                    DAG.getValueType(MVT::i32));
6313       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6314     }
6315 
6316     break;
6317   case ISD::ROTL:
6318   case ISD::ROTR:
6319     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6320            "Unexpected custom legalisation");
6321     Results.push_back(customLegalizeToWOp(N, DAG));
6322     break;
6323   case ISD::CTTZ:
6324   case ISD::CTTZ_ZERO_UNDEF:
6325   case ISD::CTLZ:
6326   case ISD::CTLZ_ZERO_UNDEF: {
6327     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6328            "Unexpected custom legalisation");
6329 
6330     SDValue NewOp0 =
6331         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6332     bool IsCTZ =
6333         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6334     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6335     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6336     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6337     return;
6338   }
6339   case ISD::SDIV:
6340   case ISD::UDIV:
6341   case ISD::UREM: {
6342     MVT VT = N->getSimpleValueType(0);
6343     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6344            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6345            "Unexpected custom legalisation");
6346     // Don't promote division/remainder by constant since we should expand those
6347     // to multiply by magic constant.
6348     // FIXME: What if the expansion is disabled for minsize.
6349     if (N->getOperand(1).getOpcode() == ISD::Constant)
6350       return;
6351 
6352     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6353     // the upper 32 bits. For other types we need to sign or zero extend
6354     // based on the opcode.
6355     unsigned ExtOpc = ISD::ANY_EXTEND;
6356     if (VT != MVT::i32)
6357       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6358                                            : ISD::ZERO_EXTEND;
6359 
6360     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6361     break;
6362   }
6363   case ISD::UADDO:
6364   case ISD::USUBO: {
6365     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6366            "Unexpected custom legalisation");
6367     bool IsAdd = N->getOpcode() == ISD::UADDO;
6368     // Create an ADDW or SUBW.
6369     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6370     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6371     SDValue Res =
6372         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6373     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6374                       DAG.getValueType(MVT::i32));
6375 
6376     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6377     // Since the inputs are sign extended from i32, this is equivalent to
6378     // comparing the lower 32 bits.
6379     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6380     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6381                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6382 
6383     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6384     Results.push_back(Overflow);
6385     return;
6386   }
6387   case ISD::UADDSAT:
6388   case ISD::USUBSAT: {
6389     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6390            "Unexpected custom legalisation");
6391     if (Subtarget.hasStdExtZbb()) {
6392       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6393       // sign extend allows overflow of the lower 32 bits to be detected on
6394       // the promoted size.
6395       SDValue LHS =
6396           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6397       SDValue RHS =
6398           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6399       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6400       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6401       return;
6402     }
6403 
6404     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6405     // promotion for UADDO/USUBO.
6406     Results.push_back(expandAddSubSat(N, DAG));
6407     return;
6408   }
6409   case ISD::BITCAST: {
6410     EVT VT = N->getValueType(0);
6411     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6412     SDValue Op0 = N->getOperand(0);
6413     EVT Op0VT = Op0.getValueType();
6414     MVT XLenVT = Subtarget.getXLenVT();
6415     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6416       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6417       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6418     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6419                Subtarget.hasStdExtF()) {
6420       SDValue FPConv =
6421           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6422       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6423     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6424                isTypeLegal(Op0VT)) {
6425       // Custom-legalize bitcasts from fixed-length vector types to illegal
6426       // scalar types in order to improve codegen. Bitcast the vector to a
6427       // one-element vector type whose element type is the same as the result
6428       // type, and extract the first element.
6429       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6430       if (isTypeLegal(BVT)) {
6431         SDValue BVec = DAG.getBitcast(BVT, Op0);
6432         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6433                                       DAG.getConstant(0, DL, XLenVT)));
6434       }
6435     }
6436     break;
6437   }
6438   case RISCVISD::GREV:
6439   case RISCVISD::GORC: {
6440     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6441            "Unexpected custom legalisation");
6442     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6443     // This is similar to customLegalizeToWOp, except that we pass the second
6444     // operand (a TargetConstant) straight through: it is already of type
6445     // XLenVT.
6446     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6447     SDValue NewOp0 =
6448         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6449     SDValue NewOp1 =
6450         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6451     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6452     // ReplaceNodeResults requires we maintain the same type for the return
6453     // value.
6454     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6455     break;
6456   }
6457   case RISCVISD::SHFL: {
6458     // There is no SHFLIW instruction, but we can just promote the operation.
6459     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6460            "Unexpected custom legalisation");
6461     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6462     SDValue NewOp0 =
6463         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6464     SDValue NewOp1 =
6465         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6466     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6467     // ReplaceNodeResults requires we maintain the same type for the return
6468     // value.
6469     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6470     break;
6471   }
6472   case ISD::BSWAP:
6473   case ISD::BITREVERSE: {
6474     MVT VT = N->getSimpleValueType(0);
6475     MVT XLenVT = Subtarget.getXLenVT();
6476     assert((VT == MVT::i8 || VT == MVT::i16 ||
6477             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6478            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6479     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6480     unsigned Imm = VT.getSizeInBits() - 1;
6481     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6482     if (N->getOpcode() == ISD::BSWAP)
6483       Imm &= ~0x7U;
6484     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6485     SDValue GREVI =
6486         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6487     // ReplaceNodeResults requires we maintain the same type for the return
6488     // value.
6489     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6490     break;
6491   }
6492   case ISD::FSHL:
6493   case ISD::FSHR: {
6494     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6495            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6496     SDValue NewOp0 =
6497         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6498     SDValue NewOp1 =
6499         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6500     SDValue NewShAmt =
6501         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6502     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6503     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6504     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6505                            DAG.getConstant(0x1f, DL, MVT::i64));
6506     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6507     // instruction use different orders. fshl will return its first operand for
6508     // shift of zero, fshr will return its second operand. fsl and fsr both
6509     // return rs1 so the ISD nodes need to have different operand orders.
6510     // Shift amount is in rs2.
6511     unsigned Opc = RISCVISD::FSLW;
6512     if (N->getOpcode() == ISD::FSHR) {
6513       std::swap(NewOp0, NewOp1);
6514       Opc = RISCVISD::FSRW;
6515     }
6516     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6517     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6518     break;
6519   }
6520   case ISD::EXTRACT_VECTOR_ELT: {
6521     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6522     // type is illegal (currently only vXi64 RV32).
6523     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6524     // transferred to the destination register. We issue two of these from the
6525     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6526     // first element.
6527     SDValue Vec = N->getOperand(0);
6528     SDValue Idx = N->getOperand(1);
6529 
6530     // The vector type hasn't been legalized yet so we can't issue target
6531     // specific nodes if it needs legalization.
6532     // FIXME: We would manually legalize if it's important.
6533     if (!isTypeLegal(Vec.getValueType()))
6534       return;
6535 
6536     MVT VecVT = Vec.getSimpleValueType();
6537 
6538     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6539            VecVT.getVectorElementType() == MVT::i64 &&
6540            "Unexpected EXTRACT_VECTOR_ELT legalization");
6541 
6542     // If this is a fixed vector, we need to convert it to a scalable vector.
6543     MVT ContainerVT = VecVT;
6544     if (VecVT.isFixedLengthVector()) {
6545       ContainerVT = getContainerForFixedLengthVector(VecVT);
6546       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6547     }
6548 
6549     MVT XLenVT = Subtarget.getXLenVT();
6550 
6551     // Use a VL of 1 to avoid processing more elements than we need.
6552     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6553     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6554     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6555 
6556     // Unless the index is known to be 0, we must slide the vector down to get
6557     // the desired element into index 0.
6558     if (!isNullConstant(Idx)) {
6559       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6560                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6561     }
6562 
6563     // Extract the lower XLEN bits of the correct vector element.
6564     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6565 
6566     // To extract the upper XLEN bits of the vector element, shift the first
6567     // element right by 32 bits and re-extract the lower XLEN bits.
6568     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6569                                      DAG.getConstant(32, DL, XLenVT), VL);
6570     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6571                                  ThirtyTwoV, Mask, VL);
6572 
6573     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6574 
6575     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6576     break;
6577   }
6578   case ISD::INTRINSIC_WO_CHAIN: {
6579     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6580     switch (IntNo) {
6581     default:
6582       llvm_unreachable(
6583           "Don't know how to custom type legalize this intrinsic!");
6584     case Intrinsic::riscv_grev:
6585     case Intrinsic::riscv_gorc:
6586     case Intrinsic::riscv_bcompress:
6587     case Intrinsic::riscv_bdecompress:
6588     case Intrinsic::riscv_bfp: {
6589       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6590              "Unexpected custom legalisation");
6591       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6592       break;
6593     }
6594     case Intrinsic::riscv_fsl:
6595     case Intrinsic::riscv_fsr: {
6596       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6597              "Unexpected custom legalisation");
6598       SDValue NewOp1 =
6599           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6600       SDValue NewOp2 =
6601           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6602       SDValue NewOp3 =
6603           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6604       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6605       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6606       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6607       break;
6608     }
6609     case Intrinsic::riscv_orc_b: {
6610       // Lower to the GORCI encoding for orc.b with the operand extended.
6611       SDValue NewOp =
6612           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6613       // If Zbp is enabled, use GORCIW which will sign extend the result.
6614       unsigned Opc =
6615           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6616       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6617                                 DAG.getConstant(7, DL, MVT::i64));
6618       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6619       return;
6620     }
6621     case Intrinsic::riscv_shfl:
6622     case Intrinsic::riscv_unshfl: {
6623       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6624              "Unexpected custom legalisation");
6625       SDValue NewOp1 =
6626           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6627       SDValue NewOp2 =
6628           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6629       unsigned Opc =
6630           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6631       if (isa<ConstantSDNode>(N->getOperand(2))) {
6632         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6633                              DAG.getConstant(0xf, DL, MVT::i64));
6634         Opc =
6635             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6636       }
6637       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6638       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6639       break;
6640     }
6641     case Intrinsic::riscv_vmv_x_s: {
6642       EVT VT = N->getValueType(0);
6643       MVT XLenVT = Subtarget.getXLenVT();
6644       if (VT.bitsLT(XLenVT)) {
6645         // Simple case just extract using vmv.x.s and truncate.
6646         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6647                                       Subtarget.getXLenVT(), N->getOperand(1));
6648         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6649         return;
6650       }
6651 
6652       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6653              "Unexpected custom legalization");
6654 
6655       // We need to do the move in two steps.
6656       SDValue Vec = N->getOperand(1);
6657       MVT VecVT = Vec.getSimpleValueType();
6658 
6659       // First extract the lower XLEN bits of the element.
6660       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6661 
6662       // To extract the upper XLEN bits of the vector element, shift the first
6663       // element right by 32 bits and re-extract the lower XLEN bits.
6664       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6665       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6666       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6667       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6668                                        DAG.getConstant(32, DL, XLenVT), VL);
6669       SDValue LShr32 =
6670           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6671       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6672 
6673       Results.push_back(
6674           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6675       break;
6676     }
6677     }
6678     break;
6679   }
6680   case ISD::VECREDUCE_ADD:
6681   case ISD::VECREDUCE_AND:
6682   case ISD::VECREDUCE_OR:
6683   case ISD::VECREDUCE_XOR:
6684   case ISD::VECREDUCE_SMAX:
6685   case ISD::VECREDUCE_UMAX:
6686   case ISD::VECREDUCE_SMIN:
6687   case ISD::VECREDUCE_UMIN:
6688     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6689       Results.push_back(V);
6690     break;
6691   case ISD::VP_REDUCE_ADD:
6692   case ISD::VP_REDUCE_AND:
6693   case ISD::VP_REDUCE_OR:
6694   case ISD::VP_REDUCE_XOR:
6695   case ISD::VP_REDUCE_SMAX:
6696   case ISD::VP_REDUCE_UMAX:
6697   case ISD::VP_REDUCE_SMIN:
6698   case ISD::VP_REDUCE_UMIN:
6699     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6700       Results.push_back(V);
6701     break;
6702   case ISD::FLT_ROUNDS_: {
6703     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6704     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6705     Results.push_back(Res.getValue(0));
6706     Results.push_back(Res.getValue(1));
6707     break;
6708   }
6709   }
6710 }
6711 
6712 // A structure to hold one of the bit-manipulation patterns below. Together, a
6713 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6714 //   (or (and (shl x, 1), 0xAAAAAAAA),
6715 //       (and (srl x, 1), 0x55555555))
6716 struct RISCVBitmanipPat {
6717   SDValue Op;
6718   unsigned ShAmt;
6719   bool IsSHL;
6720 
6721   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6722     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6723   }
6724 };
6725 
6726 // Matches patterns of the form
6727 //   (and (shl x, C2), (C1 << C2))
6728 //   (and (srl x, C2), C1)
6729 //   (shl (and x, C1), C2)
6730 //   (srl (and x, (C1 << C2)), C2)
6731 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6732 // The expected masks for each shift amount are specified in BitmanipMasks where
6733 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6734 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6735 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6736 // XLen is 64.
6737 static Optional<RISCVBitmanipPat>
6738 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6739   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6740          "Unexpected number of masks");
6741   Optional<uint64_t> Mask;
6742   // Optionally consume a mask around the shift operation.
6743   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6744     Mask = Op.getConstantOperandVal(1);
6745     Op = Op.getOperand(0);
6746   }
6747   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6748     return None;
6749   bool IsSHL = Op.getOpcode() == ISD::SHL;
6750 
6751   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6752     return None;
6753   uint64_t ShAmt = Op.getConstantOperandVal(1);
6754 
6755   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6756   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6757     return None;
6758   // If we don't have enough masks for 64 bit, then we must be trying to
6759   // match SHFL so we're only allowed to shift 1/4 of the width.
6760   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6761     return None;
6762 
6763   SDValue Src = Op.getOperand(0);
6764 
6765   // The expected mask is shifted left when the AND is found around SHL
6766   // patterns.
6767   //   ((x >> 1) & 0x55555555)
6768   //   ((x << 1) & 0xAAAAAAAA)
6769   bool SHLExpMask = IsSHL;
6770 
6771   if (!Mask) {
6772     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6773     // the mask is all ones: consume that now.
6774     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6775       Mask = Src.getConstantOperandVal(1);
6776       Src = Src.getOperand(0);
6777       // The expected mask is now in fact shifted left for SRL, so reverse the
6778       // decision.
6779       //   ((x & 0xAAAAAAAA) >> 1)
6780       //   ((x & 0x55555555) << 1)
6781       SHLExpMask = !SHLExpMask;
6782     } else {
6783       // Use a default shifted mask of all-ones if there's no AND, truncated
6784       // down to the expected width. This simplifies the logic later on.
6785       Mask = maskTrailingOnes<uint64_t>(Width);
6786       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6787     }
6788   }
6789 
6790   unsigned MaskIdx = Log2_32(ShAmt);
6791   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6792 
6793   if (SHLExpMask)
6794     ExpMask <<= ShAmt;
6795 
6796   if (Mask != ExpMask)
6797     return None;
6798 
6799   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6800 }
6801 
6802 // Matches any of the following bit-manipulation patterns:
6803 //   (and (shl x, 1), (0x55555555 << 1))
6804 //   (and (srl x, 1), 0x55555555)
6805 //   (shl (and x, 0x55555555), 1)
6806 //   (srl (and x, (0x55555555 << 1)), 1)
6807 // where the shift amount and mask may vary thus:
6808 //   [1]  = 0x55555555 / 0xAAAAAAAA
6809 //   [2]  = 0x33333333 / 0xCCCCCCCC
6810 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6811 //   [8]  = 0x00FF00FF / 0xFF00FF00
6812 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6813 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6814 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6815   // These are the unshifted masks which we use to match bit-manipulation
6816   // patterns. They may be shifted left in certain circumstances.
6817   static const uint64_t BitmanipMasks[] = {
6818       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6819       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6820 
6821   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6822 }
6823 
6824 // Match the following pattern as a GREVI(W) operation
6825 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6826 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6827                                const RISCVSubtarget &Subtarget) {
6828   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6829   EVT VT = Op.getValueType();
6830 
6831   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6832     auto LHS = matchGREVIPat(Op.getOperand(0));
6833     auto RHS = matchGREVIPat(Op.getOperand(1));
6834     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6835       SDLoc DL(Op);
6836       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6837                          DAG.getConstant(LHS->ShAmt, DL, VT));
6838     }
6839   }
6840   return SDValue();
6841 }
6842 
6843 // Matches any the following pattern as a GORCI(W) operation
6844 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6845 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6846 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6847 // Note that with the variant of 3.,
6848 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6849 // the inner pattern will first be matched as GREVI and then the outer
6850 // pattern will be matched to GORC via the first rule above.
6851 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6852 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6853                                const RISCVSubtarget &Subtarget) {
6854   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6855   EVT VT = Op.getValueType();
6856 
6857   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6858     SDLoc DL(Op);
6859     SDValue Op0 = Op.getOperand(0);
6860     SDValue Op1 = Op.getOperand(1);
6861 
6862     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6863       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6864           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6865           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6866         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6867       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6868       if ((Reverse.getOpcode() == ISD::ROTL ||
6869            Reverse.getOpcode() == ISD::ROTR) &&
6870           Reverse.getOperand(0) == X &&
6871           isa<ConstantSDNode>(Reverse.getOperand(1))) {
6872         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6873         if (RotAmt == (VT.getSizeInBits() / 2))
6874           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6875                              DAG.getConstant(RotAmt, DL, VT));
6876       }
6877       return SDValue();
6878     };
6879 
6880     // Check for either commutable permutation of (or (GREVI x, shamt), x)
6881     if (SDValue V = MatchOROfReverse(Op0, Op1))
6882       return V;
6883     if (SDValue V = MatchOROfReverse(Op1, Op0))
6884       return V;
6885 
6886     // OR is commutable so canonicalize its OR operand to the left
6887     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6888       std::swap(Op0, Op1);
6889     if (Op0.getOpcode() != ISD::OR)
6890       return SDValue();
6891     SDValue OrOp0 = Op0.getOperand(0);
6892     SDValue OrOp1 = Op0.getOperand(1);
6893     auto LHS = matchGREVIPat(OrOp0);
6894     // OR is commutable so swap the operands and try again: x might have been
6895     // on the left
6896     if (!LHS) {
6897       std::swap(OrOp0, OrOp1);
6898       LHS = matchGREVIPat(OrOp0);
6899     }
6900     auto RHS = matchGREVIPat(Op1);
6901     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6902       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6903                          DAG.getConstant(LHS->ShAmt, DL, VT));
6904     }
6905   }
6906   return SDValue();
6907 }
6908 
6909 // Matches any of the following bit-manipulation patterns:
6910 //   (and (shl x, 1), (0x22222222 << 1))
6911 //   (and (srl x, 1), 0x22222222)
6912 //   (shl (and x, 0x22222222), 1)
6913 //   (srl (and x, (0x22222222 << 1)), 1)
6914 // where the shift amount and mask may vary thus:
6915 //   [1]  = 0x22222222 / 0x44444444
6916 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
6917 //   [4]  = 0x00F000F0 / 0x0F000F00
6918 //   [8]  = 0x0000FF00 / 0x00FF0000
6919 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
6920 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6921   // These are the unshifted masks which we use to match bit-manipulation
6922   // patterns. They may be shifted left in certain circumstances.
6923   static const uint64_t BitmanipMasks[] = {
6924       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6925       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6926 
6927   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6928 }
6929 
6930 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
6931 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6932                                const RISCVSubtarget &Subtarget) {
6933   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6934   EVT VT = Op.getValueType();
6935 
6936   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6937     return SDValue();
6938 
6939   SDValue Op0 = Op.getOperand(0);
6940   SDValue Op1 = Op.getOperand(1);
6941 
6942   // Or is commutable so canonicalize the second OR to the LHS.
6943   if (Op0.getOpcode() != ISD::OR)
6944     std::swap(Op0, Op1);
6945   if (Op0.getOpcode() != ISD::OR)
6946     return SDValue();
6947 
6948   // We found an inner OR, so our operands are the operands of the inner OR
6949   // and the other operand of the outer OR.
6950   SDValue A = Op0.getOperand(0);
6951   SDValue B = Op0.getOperand(1);
6952   SDValue C = Op1;
6953 
6954   auto Match1 = matchSHFLPat(A);
6955   auto Match2 = matchSHFLPat(B);
6956 
6957   // If neither matched, we failed.
6958   if (!Match1 && !Match2)
6959     return SDValue();
6960 
6961   // We had at least one match. if one failed, try the remaining C operand.
6962   if (!Match1) {
6963     std::swap(A, C);
6964     Match1 = matchSHFLPat(A);
6965     if (!Match1)
6966       return SDValue();
6967   } else if (!Match2) {
6968     std::swap(B, C);
6969     Match2 = matchSHFLPat(B);
6970     if (!Match2)
6971       return SDValue();
6972   }
6973   assert(Match1 && Match2);
6974 
6975   // Make sure our matches pair up.
6976   if (!Match1->formsPairWith(*Match2))
6977     return SDValue();
6978 
6979   // All the remains is to make sure C is an AND with the same input, that masks
6980   // out the bits that are being shuffled.
6981   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6982       C.getOperand(0) != Match1->Op)
6983     return SDValue();
6984 
6985   uint64_t Mask = C.getConstantOperandVal(1);
6986 
6987   static const uint64_t BitmanipMasks[] = {
6988       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6989       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6990   };
6991 
6992   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6993   unsigned MaskIdx = Log2_32(Match1->ShAmt);
6994   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6995 
6996   if (Mask != ExpMask)
6997     return SDValue();
6998 
6999   SDLoc DL(Op);
7000   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7001                      DAG.getConstant(Match1->ShAmt, DL, VT));
7002 }
7003 
7004 // Optimize (add (shl x, c0), (shl y, c1)) ->
7005 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7006 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7007                                   const RISCVSubtarget &Subtarget) {
7008   // Perform this optimization only in the zba extension.
7009   if (!Subtarget.hasStdExtZba())
7010     return SDValue();
7011 
7012   // Skip for vector types and larger types.
7013   EVT VT = N->getValueType(0);
7014   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7015     return SDValue();
7016 
7017   // The two operand nodes must be SHL and have no other use.
7018   SDValue N0 = N->getOperand(0);
7019   SDValue N1 = N->getOperand(1);
7020   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7021       !N0->hasOneUse() || !N1->hasOneUse())
7022     return SDValue();
7023 
7024   // Check c0 and c1.
7025   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7026   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7027   if (!N0C || !N1C)
7028     return SDValue();
7029   int64_t C0 = N0C->getSExtValue();
7030   int64_t C1 = N1C->getSExtValue();
7031   if (C0 <= 0 || C1 <= 0)
7032     return SDValue();
7033 
7034   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7035   int64_t Bits = std::min(C0, C1);
7036   int64_t Diff = std::abs(C0 - C1);
7037   if (Diff != 1 && Diff != 2 && Diff != 3)
7038     return SDValue();
7039 
7040   // Build nodes.
7041   SDLoc DL(N);
7042   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7043   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7044   SDValue NA0 =
7045       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7046   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7047   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7048 }
7049 
7050 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7051 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7052 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7053 // not undo itself, but they are redundant.
7054 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7055   SDValue Src = N->getOperand(0);
7056 
7057   if (Src.getOpcode() != N->getOpcode())
7058     return SDValue();
7059 
7060   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7061       !isa<ConstantSDNode>(Src.getOperand(1)))
7062     return SDValue();
7063 
7064   unsigned ShAmt1 = N->getConstantOperandVal(1);
7065   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7066   Src = Src.getOperand(0);
7067 
7068   unsigned CombinedShAmt;
7069   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7070     CombinedShAmt = ShAmt1 | ShAmt2;
7071   else
7072     CombinedShAmt = ShAmt1 ^ ShAmt2;
7073 
7074   if (CombinedShAmt == 0)
7075     return Src;
7076 
7077   SDLoc DL(N);
7078   return DAG.getNode(
7079       N->getOpcode(), DL, N->getValueType(0), Src,
7080       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7081 }
7082 
7083 // Combine a constant select operand into its use:
7084 //
7085 // (and (select cond, -1, c), x)
7086 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7087 // (or  (select cond, 0, c), x)
7088 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7089 // (xor (select cond, 0, c), x)
7090 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7091 // (add (select cond, 0, c), x)
7092 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7093 // (sub x, (select cond, 0, c))
7094 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7095 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7096                                    SelectionDAG &DAG, bool AllOnes) {
7097   EVT VT = N->getValueType(0);
7098 
7099   // Skip vectors.
7100   if (VT.isVector())
7101     return SDValue();
7102 
7103   if ((Slct.getOpcode() != ISD::SELECT &&
7104        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7105       !Slct.hasOneUse())
7106     return SDValue();
7107 
7108   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7109     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7110   };
7111 
7112   bool SwapSelectOps;
7113   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7114   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7115   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7116   SDValue NonConstantVal;
7117   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7118     SwapSelectOps = false;
7119     NonConstantVal = FalseVal;
7120   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7121     SwapSelectOps = true;
7122     NonConstantVal = TrueVal;
7123   } else
7124     return SDValue();
7125 
7126   // Slct is now know to be the desired identity constant when CC is true.
7127   TrueVal = OtherOp;
7128   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7129   // Unless SwapSelectOps says the condition should be false.
7130   if (SwapSelectOps)
7131     std::swap(TrueVal, FalseVal);
7132 
7133   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7134     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7135                        {Slct.getOperand(0), Slct.getOperand(1),
7136                         Slct.getOperand(2), TrueVal, FalseVal});
7137 
7138   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7139                      {Slct.getOperand(0), TrueVal, FalseVal});
7140 }
7141 
7142 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7143 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7144                                               bool AllOnes) {
7145   SDValue N0 = N->getOperand(0);
7146   SDValue N1 = N->getOperand(1);
7147   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7148     return Result;
7149   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7150     return Result;
7151   return SDValue();
7152 }
7153 
7154 // Transform (add (mul x, c0), c1) ->
7155 //           (add (mul (add x, c1/c0), c0), c1%c0).
7156 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7157 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7158 // to an infinite loop in DAGCombine if transformed.
7159 // Or transform (add (mul x, c0), c1) ->
7160 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7161 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7162 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7163 // lead to an infinite loop in DAGCombine if transformed.
7164 // Or transform (add (mul x, c0), c1) ->
7165 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7166 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7167 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7168 // lead to an infinite loop in DAGCombine if transformed.
7169 // Or transform (add (mul x, c0), c1) ->
7170 //              (mul (add x, c1/c0), c0).
7171 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7172 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7173                                      const RISCVSubtarget &Subtarget) {
7174   // Skip for vector types and larger types.
7175   EVT VT = N->getValueType(0);
7176   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7177     return SDValue();
7178   // The first operand node must be a MUL and has no other use.
7179   SDValue N0 = N->getOperand(0);
7180   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7181     return SDValue();
7182   // Check if c0 and c1 match above conditions.
7183   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7184   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7185   if (!N0C || !N1C)
7186     return SDValue();
7187   int64_t C0 = N0C->getSExtValue();
7188   int64_t C1 = N1C->getSExtValue();
7189   int64_t CA, CB;
7190   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7191     return SDValue();
7192   // Search for proper CA (non-zero) and CB that both are simm12.
7193   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7194       !isInt<12>(C0 * (C1 / C0))) {
7195     CA = C1 / C0;
7196     CB = C1 % C0;
7197   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7198              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7199     CA = C1 / C0 + 1;
7200     CB = C1 % C0 - C0;
7201   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7202              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7203     CA = C1 / C0 - 1;
7204     CB = C1 % C0 + C0;
7205   } else
7206     return SDValue();
7207   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7208   SDLoc DL(N);
7209   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7210                              DAG.getConstant(CA, DL, VT));
7211   SDValue New1 =
7212       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7213   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7214 }
7215 
7216 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7217                                  const RISCVSubtarget &Subtarget) {
7218   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7219     return V;
7220   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7221     return V;
7222   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7223   //      (select lhs, rhs, cc, x, (add x, y))
7224   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7225 }
7226 
7227 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7228   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7229   //      (select lhs, rhs, cc, x, (sub x, y))
7230   SDValue N0 = N->getOperand(0);
7231   SDValue N1 = N->getOperand(1);
7232   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7233 }
7234 
7235 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7236   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7237   //      (select lhs, rhs, cc, x, (and x, y))
7238   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7239 }
7240 
7241 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7242                                 const RISCVSubtarget &Subtarget) {
7243   if (Subtarget.hasStdExtZbp()) {
7244     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7245       return GREV;
7246     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7247       return GORC;
7248     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7249       return SHFL;
7250   }
7251 
7252   // fold (or (select cond, 0, y), x) ->
7253   //      (select cond, x, (or x, y))
7254   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7255 }
7256 
7257 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7258   // fold (xor (select cond, 0, y), x) ->
7259   //      (select cond, x, (xor x, y))
7260   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7261 }
7262 
7263 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7264 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7265 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7266 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7267 // ADDW/SUBW/MULW.
7268 static SDValue performANY_EXTENDCombine(SDNode *N,
7269                                         TargetLowering::DAGCombinerInfo &DCI,
7270                                         const RISCVSubtarget &Subtarget) {
7271   if (!Subtarget.is64Bit())
7272     return SDValue();
7273 
7274   SelectionDAG &DAG = DCI.DAG;
7275 
7276   SDValue Src = N->getOperand(0);
7277   EVT VT = N->getValueType(0);
7278   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7279     return SDValue();
7280 
7281   // The opcode must be one that can implicitly sign_extend.
7282   // FIXME: Additional opcodes.
7283   switch (Src.getOpcode()) {
7284   default:
7285     return SDValue();
7286   case ISD::MUL:
7287     if (!Subtarget.hasStdExtM())
7288       return SDValue();
7289     LLVM_FALLTHROUGH;
7290   case ISD::ADD:
7291   case ISD::SUB:
7292     break;
7293   }
7294 
7295   // Only handle cases where the result is used by a CopyToReg. That likely
7296   // means the value is a liveout of the basic block. This helps prevent
7297   // infinite combine loops like PR51206.
7298   if (none_of(N->uses(),
7299               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7300     return SDValue();
7301 
7302   SmallVector<SDNode *, 4> SetCCs;
7303   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7304                             UE = Src.getNode()->use_end();
7305        UI != UE; ++UI) {
7306     SDNode *User = *UI;
7307     if (User == N)
7308       continue;
7309     if (UI.getUse().getResNo() != Src.getResNo())
7310       continue;
7311     // All i32 setccs are legalized by sign extending operands.
7312     if (User->getOpcode() == ISD::SETCC) {
7313       SetCCs.push_back(User);
7314       continue;
7315     }
7316     // We don't know if we can extend this user.
7317     break;
7318   }
7319 
7320   // If we don't have any SetCCs, this isn't worthwhile.
7321   if (SetCCs.empty())
7322     return SDValue();
7323 
7324   SDLoc DL(N);
7325   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7326   DCI.CombineTo(N, SExt);
7327 
7328   // Promote all the setccs.
7329   for (SDNode *SetCC : SetCCs) {
7330     SmallVector<SDValue, 4> Ops;
7331 
7332     for (unsigned j = 0; j != 2; ++j) {
7333       SDValue SOp = SetCC->getOperand(j);
7334       if (SOp == Src)
7335         Ops.push_back(SExt);
7336       else
7337         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7338     }
7339 
7340     Ops.push_back(SetCC->getOperand(2));
7341     DCI.CombineTo(SetCC,
7342                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7343   }
7344   return SDValue(N, 0);
7345 }
7346 
7347 // Try to form VWMUL, VWMULU or VWMULSU.
7348 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7349 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7350                                        bool Commute) {
7351   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7352   SDValue Op0 = N->getOperand(0);
7353   SDValue Op1 = N->getOperand(1);
7354   if (Commute)
7355     std::swap(Op0, Op1);
7356 
7357   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7358   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7359   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7360   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7361     return SDValue();
7362 
7363   SDValue Mask = N->getOperand(2);
7364   SDValue VL = N->getOperand(3);
7365 
7366   // Make sure the mask and VL match.
7367   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7368     return SDValue();
7369 
7370   MVT VT = N->getSimpleValueType(0);
7371 
7372   // Determine the narrow size for a widening multiply.
7373   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7374   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7375                                   VT.getVectorElementCount());
7376 
7377   SDLoc DL(N);
7378 
7379   // See if the other operand is the same opcode.
7380   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7381     if (!Op1.hasOneUse())
7382       return SDValue();
7383 
7384     // Make sure the mask and VL match.
7385     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7386       return SDValue();
7387 
7388     Op1 = Op1.getOperand(0);
7389   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7390     // The operand is a splat of a scalar.
7391 
7392     // The VL must be the same.
7393     if (Op1.getOperand(1) != VL)
7394       return SDValue();
7395 
7396     // Get the scalar value.
7397     Op1 = Op1.getOperand(0);
7398 
7399     // See if have enough sign bits or zero bits in the scalar to use a
7400     // widening multiply by splatting to smaller element size.
7401     unsigned EltBits = VT.getScalarSizeInBits();
7402     unsigned ScalarBits = Op1.getValueSizeInBits();
7403     // Make sure we're getting all element bits from the scalar register.
7404     // FIXME: Support implicit sign extension of vmv.v.x?
7405     if (ScalarBits < EltBits)
7406       return SDValue();
7407 
7408     if (IsSignExt) {
7409       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
7410         return SDValue();
7411     } else {
7412       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7413       if (!DAG.MaskedValueIsZero(Op1, Mask))
7414         return SDValue();
7415     }
7416 
7417     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
7418   } else
7419     return SDValue();
7420 
7421   Op0 = Op0.getOperand(0);
7422 
7423   // Re-introduce narrower extends if needed.
7424   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7425   if (Op0.getValueType() != NarrowVT)
7426     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7427   if (Op1.getValueType() != NarrowVT)
7428     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7429 
7430   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7431   if (!IsVWMULSU)
7432     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7433   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7434 }
7435 
7436 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7437   switch (Op.getOpcode()) {
7438   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7439   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7440   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7441   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7442   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7443   }
7444 
7445   return RISCVFPRndMode::Invalid;
7446 }
7447 
7448 // Fold
7449 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7450 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7451 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7452 //   (fp_to_int (fceil X))      -> fcvt X, rup
7453 //   (fp_to_int (fround X))     -> fcvt X, rmm
7454 static SDValue performFP_TO_INTCombine(SDNode *N,
7455                                        TargetLowering::DAGCombinerInfo &DCI,
7456                                        const RISCVSubtarget &Subtarget) {
7457   SelectionDAG &DAG = DCI.DAG;
7458   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7459   MVT XLenVT = Subtarget.getXLenVT();
7460 
7461   // Only handle XLen or i32 types. Other types narrower than XLen will
7462   // eventually be legalized to XLenVT.
7463   EVT VT = N->getValueType(0);
7464   if (VT != MVT::i32 && VT != XLenVT)
7465     return SDValue();
7466 
7467   SDValue Src = N->getOperand(0);
7468 
7469   // Ensure the FP type is also legal.
7470   if (!TLI.isTypeLegal(Src.getValueType()))
7471     return SDValue();
7472 
7473   // Don't do this for f16 with Zfhmin and not Zfh.
7474   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7475     return SDValue();
7476 
7477   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7478   if (FRM == RISCVFPRndMode::Invalid)
7479     return SDValue();
7480 
7481   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7482 
7483   unsigned Opc;
7484   if (VT == XLenVT)
7485     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7486   else
7487     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7488 
7489   SDLoc DL(N);
7490   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7491                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7492   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7493 }
7494 
7495 // Fold
7496 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7497 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7498 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7499 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7500 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7501 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7502                                        TargetLowering::DAGCombinerInfo &DCI,
7503                                        const RISCVSubtarget &Subtarget) {
7504   SelectionDAG &DAG = DCI.DAG;
7505   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7506   MVT XLenVT = Subtarget.getXLenVT();
7507 
7508   // Only handle XLen types. Other types narrower than XLen will eventually be
7509   // legalized to XLenVT.
7510   EVT DstVT = N->getValueType(0);
7511   if (DstVT != XLenVT)
7512     return SDValue();
7513 
7514   SDValue Src = N->getOperand(0);
7515 
7516   // Ensure the FP type is also legal.
7517   if (!TLI.isTypeLegal(Src.getValueType()))
7518     return SDValue();
7519 
7520   // Don't do this for f16 with Zfhmin and not Zfh.
7521   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7522     return SDValue();
7523 
7524   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7525 
7526   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7527   if (FRM == RISCVFPRndMode::Invalid)
7528     return SDValue();
7529 
7530   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7531 
7532   unsigned Opc;
7533   if (SatVT == DstVT)
7534     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7535   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7536     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7537   else
7538     return SDValue();
7539   // FIXME: Support other SatVTs by clamping before or after the conversion.
7540 
7541   Src = Src.getOperand(0);
7542 
7543   SDLoc DL(N);
7544   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7545                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7546 
7547   // RISCV FP-to-int conversions saturate to the destination register size, but
7548   // don't produce 0 for nan.
7549   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7550   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7551 }
7552 
7553 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7554                                                DAGCombinerInfo &DCI) const {
7555   SelectionDAG &DAG = DCI.DAG;
7556 
7557   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7558   // bits are demanded. N will be added to the Worklist if it was not deleted.
7559   // Caller should return SDValue(N, 0) if this returns true.
7560   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7561     SDValue Op = N->getOperand(OpNo);
7562     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7563     if (!SimplifyDemandedBits(Op, Mask, DCI))
7564       return false;
7565 
7566     if (N->getOpcode() != ISD::DELETED_NODE)
7567       DCI.AddToWorklist(N);
7568     return true;
7569   };
7570 
7571   switch (N->getOpcode()) {
7572   default:
7573     break;
7574   case RISCVISD::SplitF64: {
7575     SDValue Op0 = N->getOperand(0);
7576     // If the input to SplitF64 is just BuildPairF64 then the operation is
7577     // redundant. Instead, use BuildPairF64's operands directly.
7578     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7579       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7580 
7581     SDLoc DL(N);
7582 
7583     // It's cheaper to materialise two 32-bit integers than to load a double
7584     // from the constant pool and transfer it to integer registers through the
7585     // stack.
7586     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7587       APInt V = C->getValueAPF().bitcastToAPInt();
7588       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7589       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7590       return DCI.CombineTo(N, Lo, Hi);
7591     }
7592 
7593     // This is a target-specific version of a DAGCombine performed in
7594     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7595     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7596     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7597     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7598         !Op0.getNode()->hasOneUse())
7599       break;
7600     SDValue NewSplitF64 =
7601         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7602                     Op0.getOperand(0));
7603     SDValue Lo = NewSplitF64.getValue(0);
7604     SDValue Hi = NewSplitF64.getValue(1);
7605     APInt SignBit = APInt::getSignMask(32);
7606     if (Op0.getOpcode() == ISD::FNEG) {
7607       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
7608                                   DAG.getConstant(SignBit, DL, MVT::i32));
7609       return DCI.CombineTo(N, Lo, NewHi);
7610     }
7611     assert(Op0.getOpcode() == ISD::FABS);
7612     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
7613                                 DAG.getConstant(~SignBit, DL, MVT::i32));
7614     return DCI.CombineTo(N, Lo, NewHi);
7615   }
7616   case RISCVISD::SLLW:
7617   case RISCVISD::SRAW:
7618   case RISCVISD::SRLW:
7619   case RISCVISD::ROLW:
7620   case RISCVISD::RORW: {
7621     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7622     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7623         SimplifyDemandedLowBitsHelper(1, 5))
7624       return SDValue(N, 0);
7625     break;
7626   }
7627   case RISCVISD::CLZW:
7628   case RISCVISD::CTZW: {
7629     // Only the lower 32 bits of the first operand are read
7630     if (SimplifyDemandedLowBitsHelper(0, 32))
7631       return SDValue(N, 0);
7632     break;
7633   }
7634   case RISCVISD::GREV:
7635   case RISCVISD::GORC: {
7636     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
7637     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7638     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7639     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
7640       return SDValue(N, 0);
7641 
7642     return combineGREVI_GORCI(N, DAG);
7643   }
7644   case RISCVISD::GREVW:
7645   case RISCVISD::GORCW: {
7646     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7647     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7648         SimplifyDemandedLowBitsHelper(1, 5))
7649       return SDValue(N, 0);
7650 
7651     return combineGREVI_GORCI(N, DAG);
7652   }
7653   case RISCVISD::SHFL:
7654   case RISCVISD::UNSHFL: {
7655     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
7656     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7657     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7658     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
7659       return SDValue(N, 0);
7660 
7661     break;
7662   }
7663   case RISCVISD::SHFLW:
7664   case RISCVISD::UNSHFLW: {
7665     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
7666     SDValue LHS = N->getOperand(0);
7667     SDValue RHS = N->getOperand(1);
7668     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
7669     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
7670     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7671         SimplifyDemandedLowBitsHelper(1, 4))
7672       return SDValue(N, 0);
7673 
7674     break;
7675   }
7676   case RISCVISD::BCOMPRESSW:
7677   case RISCVISD::BDECOMPRESSW: {
7678     // Only the lower 32 bits of LHS and RHS are read.
7679     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7680         SimplifyDemandedLowBitsHelper(1, 32))
7681       return SDValue(N, 0);
7682 
7683     break;
7684   }
7685   case RISCVISD::FMV_X_ANYEXTH:
7686   case RISCVISD::FMV_X_ANYEXTW_RV64: {
7687     SDLoc DL(N);
7688     SDValue Op0 = N->getOperand(0);
7689     MVT VT = N->getSimpleValueType(0);
7690     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7691     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7692     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7693     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7694          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7695         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7696          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7697       assert(Op0.getOperand(0).getValueType() == VT &&
7698              "Unexpected value type!");
7699       return Op0.getOperand(0);
7700     }
7701 
7702     // This is a target-specific version of a DAGCombine performed in
7703     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7704     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7705     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7706     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7707         !Op0.getNode()->hasOneUse())
7708       break;
7709     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7710     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7711     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7712     if (Op0.getOpcode() == ISD::FNEG)
7713       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7714                          DAG.getConstant(SignBit, DL, VT));
7715 
7716     assert(Op0.getOpcode() == ISD::FABS);
7717     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7718                        DAG.getConstant(~SignBit, DL, VT));
7719   }
7720   case ISD::ADD:
7721     return performADDCombine(N, DAG, Subtarget);
7722   case ISD::SUB:
7723     return performSUBCombine(N, DAG);
7724   case ISD::AND:
7725     return performANDCombine(N, DAG);
7726   case ISD::OR:
7727     return performORCombine(N, DAG, Subtarget);
7728   case ISD::XOR:
7729     return performXORCombine(N, DAG);
7730   case ISD::ANY_EXTEND:
7731     return performANY_EXTENDCombine(N, DCI, Subtarget);
7732   case ISD::ZERO_EXTEND:
7733     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7734     // type legalization. This is safe because fp_to_uint produces poison if
7735     // it overflows.
7736     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
7737       SDValue Src = N->getOperand(0);
7738       if (Src.getOpcode() == ISD::FP_TO_UINT &&
7739           isTypeLegal(Src.getOperand(0).getValueType()))
7740         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
7741                            Src.getOperand(0));
7742       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
7743           isTypeLegal(Src.getOperand(1).getValueType())) {
7744         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
7745         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
7746                                   Src.getOperand(0), Src.getOperand(1));
7747         DCI.CombineTo(N, Res);
7748         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
7749         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
7750         return SDValue(N, 0); // Return N so it doesn't get rechecked.
7751       }
7752     }
7753     return SDValue();
7754   case RISCVISD::SELECT_CC: {
7755     // Transform
7756     SDValue LHS = N->getOperand(0);
7757     SDValue RHS = N->getOperand(1);
7758     SDValue TrueV = N->getOperand(3);
7759     SDValue FalseV = N->getOperand(4);
7760 
7761     // If the True and False values are the same, we don't need a select_cc.
7762     if (TrueV == FalseV)
7763       return TrueV;
7764 
7765     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
7766     if (!ISD::isIntEqualitySetCC(CCVal))
7767       break;
7768 
7769     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
7770     //      (select_cc X, Y, lt, trueV, falseV)
7771     // Sometimes the setcc is introduced after select_cc has been formed.
7772     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7773         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7774       // If we're looking for eq 0 instead of ne 0, we need to invert the
7775       // condition.
7776       bool Invert = CCVal == ISD::SETEQ;
7777       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7778       if (Invert)
7779         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7780 
7781       SDLoc DL(N);
7782       RHS = LHS.getOperand(1);
7783       LHS = LHS.getOperand(0);
7784       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7785 
7786       SDValue TargetCC = DAG.getCondCode(CCVal);
7787       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7788                          {LHS, RHS, TargetCC, TrueV, FalseV});
7789     }
7790 
7791     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
7792     //      (select_cc X, Y, eq/ne, trueV, falseV)
7793     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7794       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
7795                          {LHS.getOperand(0), LHS.getOperand(1),
7796                           N->getOperand(2), TrueV, FalseV});
7797     // (select_cc X, 1, setne, trueV, falseV) ->
7798     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
7799     // This can occur when legalizing some floating point comparisons.
7800     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7801     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7802       SDLoc DL(N);
7803       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7804       SDValue TargetCC = DAG.getCondCode(CCVal);
7805       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7806       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
7807                          {LHS, RHS, TargetCC, TrueV, FalseV});
7808     }
7809 
7810     break;
7811   }
7812   case RISCVISD::BR_CC: {
7813     SDValue LHS = N->getOperand(1);
7814     SDValue RHS = N->getOperand(2);
7815     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
7816     if (!ISD::isIntEqualitySetCC(CCVal))
7817       break;
7818 
7819     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
7820     //      (br_cc X, Y, lt, dest)
7821     // Sometimes the setcc is introduced after br_cc has been formed.
7822     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
7823         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
7824       // If we're looking for eq 0 instead of ne 0, we need to invert the
7825       // condition.
7826       bool Invert = CCVal == ISD::SETEQ;
7827       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
7828       if (Invert)
7829         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7830 
7831       SDLoc DL(N);
7832       RHS = LHS.getOperand(1);
7833       LHS = LHS.getOperand(0);
7834       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
7835 
7836       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7837                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
7838                          N->getOperand(4));
7839     }
7840 
7841     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
7842     //      (br_cc X, Y, eq/ne, trueV, falseV)
7843     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
7844       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
7845                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
7846                          N->getOperand(3), N->getOperand(4));
7847 
7848     // (br_cc X, 1, setne, br_cc) ->
7849     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
7850     // This can occur when legalizing some floating point comparisons.
7851     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
7852     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
7853       SDLoc DL(N);
7854       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
7855       SDValue TargetCC = DAG.getCondCode(CCVal);
7856       RHS = DAG.getConstant(0, DL, LHS.getValueType());
7857       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
7858                          N->getOperand(0), LHS, RHS, TargetCC,
7859                          N->getOperand(4));
7860     }
7861     break;
7862   }
7863   case ISD::FP_TO_SINT:
7864   case ISD::FP_TO_UINT:
7865     return performFP_TO_INTCombine(N, DCI, Subtarget);
7866   case ISD::FP_TO_SINT_SAT:
7867   case ISD::FP_TO_UINT_SAT:
7868     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
7869   case ISD::FCOPYSIGN: {
7870     EVT VT = N->getValueType(0);
7871     if (!VT.isVector())
7872       break;
7873     // There is a form of VFSGNJ which injects the negated sign of its second
7874     // operand. Try and bubble any FNEG up after the extend/round to produce
7875     // this optimized pattern. Avoid modifying cases where FP_ROUND and
7876     // TRUNC=1.
7877     SDValue In2 = N->getOperand(1);
7878     // Avoid cases where the extend/round has multiple uses, as duplicating
7879     // those is typically more expensive than removing a fneg.
7880     if (!In2.hasOneUse())
7881       break;
7882     if (In2.getOpcode() != ISD::FP_EXTEND &&
7883         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
7884       break;
7885     In2 = In2.getOperand(0);
7886     if (In2.getOpcode() != ISD::FNEG)
7887       break;
7888     SDLoc DL(N);
7889     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7890     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7891                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7892   }
7893   case ISD::MGATHER:
7894   case ISD::MSCATTER:
7895   case ISD::VP_GATHER:
7896   case ISD::VP_SCATTER: {
7897     if (!DCI.isBeforeLegalize())
7898       break;
7899     SDValue Index, ScaleOp;
7900     bool IsIndexScaled = false;
7901     bool IsIndexSigned = false;
7902     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7903       Index = VPGSN->getIndex();
7904       ScaleOp = VPGSN->getScale();
7905       IsIndexScaled = VPGSN->isIndexScaled();
7906       IsIndexSigned = VPGSN->isIndexSigned();
7907     } else {
7908       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7909       Index = MGSN->getIndex();
7910       ScaleOp = MGSN->getScale();
7911       IsIndexScaled = MGSN->isIndexScaled();
7912       IsIndexSigned = MGSN->isIndexSigned();
7913     }
7914     EVT IndexVT = Index.getValueType();
7915     MVT XLenVT = Subtarget.getXLenVT();
7916     // RISCV indexed loads only support the "unsigned unscaled" addressing
7917     // mode, so anything else must be manually legalized.
7918     bool NeedsIdxLegalization =
7919         IsIndexScaled ||
7920         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7921     if (!NeedsIdxLegalization)
7922       break;
7923 
7924     SDLoc DL(N);
7925 
7926     // Any index legalization should first promote to XLenVT, so we don't lose
7927     // bits when scaling. This may create an illegal index type so we let
7928     // LLVM's legalization take care of the splitting.
7929     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7930     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7931       IndexVT = IndexVT.changeVectorElementType(XLenVT);
7932       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7933                           DL, IndexVT, Index);
7934     }
7935 
7936     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7937     if (IsIndexScaled && Scale != 1) {
7938       // Manually scale the indices by the element size.
7939       // TODO: Sanitize the scale operand here?
7940       // TODO: For VP nodes, should we use VP_SHL here?
7941       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7942       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7943       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7944     }
7945 
7946     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7947     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7948       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7949                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
7950                               VPGN->getScale(), VPGN->getMask(),
7951                               VPGN->getVectorLength()},
7952                              VPGN->getMemOperand(), NewIndexTy);
7953     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7954       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7955                               {VPSN->getChain(), VPSN->getValue(),
7956                                VPSN->getBasePtr(), Index, VPSN->getScale(),
7957                                VPSN->getMask(), VPSN->getVectorLength()},
7958                               VPSN->getMemOperand(), NewIndexTy);
7959     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7960       return DAG.getMaskedGather(
7961           N->getVTList(), MGN->getMemoryVT(), DL,
7962           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7963            MGN->getBasePtr(), Index, MGN->getScale()},
7964           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7965     const auto *MSN = cast<MaskedScatterSDNode>(N);
7966     return DAG.getMaskedScatter(
7967         N->getVTList(), MSN->getMemoryVT(), DL,
7968         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7969          Index, MSN->getScale()},
7970         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7971   }
7972   case RISCVISD::SRA_VL:
7973   case RISCVISD::SRL_VL:
7974   case RISCVISD::SHL_VL: {
7975     SDValue ShAmt = N->getOperand(1);
7976     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7977       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7978       SDLoc DL(N);
7979       SDValue VL = N->getOperand(3);
7980       EVT VT = N->getValueType(0);
7981       ShAmt =
7982           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7983       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7984                          N->getOperand(2), N->getOperand(3));
7985     }
7986     break;
7987   }
7988   case ISD::SRA:
7989   case ISD::SRL:
7990   case ISD::SHL: {
7991     SDValue ShAmt = N->getOperand(1);
7992     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7993       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7994       SDLoc DL(N);
7995       EVT VT = N->getValueType(0);
7996       ShAmt =
7997           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7998       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7999     }
8000     break;
8001   }
8002   case RISCVISD::MUL_VL:
8003     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8004       return V;
8005     // Mul is commutative.
8006     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8007   case ISD::STORE: {
8008     auto *Store = cast<StoreSDNode>(N);
8009     SDValue Val = Store->getValue();
8010     // Combine store of vmv.x.s to vse with VL of 1.
8011     // FIXME: Support FP.
8012     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8013       SDValue Src = Val.getOperand(0);
8014       EVT VecVT = Src.getValueType();
8015       EVT MemVT = Store->getMemoryVT();
8016       // The memory VT and the element type must match.
8017       if (VecVT.getVectorElementType() == MemVT) {
8018         SDLoc DL(N);
8019         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8020         return DAG.getStoreVP(
8021             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8022             DAG.getConstant(1, DL, MaskVT),
8023             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8024             Store->getMemOperand(), Store->getAddressingMode(),
8025             Store->isTruncatingStore(), /*IsCompress*/ false);
8026       }
8027     }
8028 
8029     break;
8030   }
8031   }
8032 
8033   return SDValue();
8034 }
8035 
8036 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8037     const SDNode *N, CombineLevel Level) const {
8038   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8039   // materialised in fewer instructions than `(OP _, c1)`:
8040   //
8041   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8042   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8043   SDValue N0 = N->getOperand(0);
8044   EVT Ty = N0.getValueType();
8045   if (Ty.isScalarInteger() &&
8046       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8047     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8048     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8049     if (C1 && C2) {
8050       const APInt &C1Int = C1->getAPIntValue();
8051       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8052 
8053       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8054       // and the combine should happen, to potentially allow further combines
8055       // later.
8056       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8057           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8058         return true;
8059 
8060       // We can materialise `c1` in an add immediate, so it's "free", and the
8061       // combine should be prevented.
8062       if (C1Int.getMinSignedBits() <= 64 &&
8063           isLegalAddImmediate(C1Int.getSExtValue()))
8064         return false;
8065 
8066       // Neither constant will fit into an immediate, so find materialisation
8067       // costs.
8068       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8069                                               Subtarget.getFeatureBits(),
8070                                               /*CompressionCost*/true);
8071       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8072           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8073           /*CompressionCost*/true);
8074 
8075       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8076       // combine should be prevented.
8077       if (C1Cost < ShiftedC1Cost)
8078         return false;
8079     }
8080   }
8081   return true;
8082 }
8083 
8084 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8085     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8086     TargetLoweringOpt &TLO) const {
8087   // Delay this optimization as late as possible.
8088   if (!TLO.LegalOps)
8089     return false;
8090 
8091   EVT VT = Op.getValueType();
8092   if (VT.isVector())
8093     return false;
8094 
8095   // Only handle AND for now.
8096   if (Op.getOpcode() != ISD::AND)
8097     return false;
8098 
8099   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8100   if (!C)
8101     return false;
8102 
8103   const APInt &Mask = C->getAPIntValue();
8104 
8105   // Clear all non-demanded bits initially.
8106   APInt ShrunkMask = Mask & DemandedBits;
8107 
8108   // Try to make a smaller immediate by setting undemanded bits.
8109 
8110   APInt ExpandedMask = Mask | ~DemandedBits;
8111 
8112   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8113     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8114   };
8115   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8116     if (NewMask == Mask)
8117       return true;
8118     SDLoc DL(Op);
8119     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8120     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8121     return TLO.CombineTo(Op, NewOp);
8122   };
8123 
8124   // If the shrunk mask fits in sign extended 12 bits, let the target
8125   // independent code apply it.
8126   if (ShrunkMask.isSignedIntN(12))
8127     return false;
8128 
8129   // Preserve (and X, 0xffff) when zext.h is supported.
8130   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8131     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8132     if (IsLegalMask(NewMask))
8133       return UseMask(NewMask);
8134   }
8135 
8136   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8137   if (VT == MVT::i64) {
8138     APInt NewMask = APInt(64, 0xffffffff);
8139     if (IsLegalMask(NewMask))
8140       return UseMask(NewMask);
8141   }
8142 
8143   // For the remaining optimizations, we need to be able to make a negative
8144   // number through a combination of mask and undemanded bits.
8145   if (!ExpandedMask.isNegative())
8146     return false;
8147 
8148   // What is the fewest number of bits we need to represent the negative number.
8149   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8150 
8151   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8152   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8153   APInt NewMask = ShrunkMask;
8154   if (MinSignedBits <= 12)
8155     NewMask.setBitsFrom(11);
8156   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8157     NewMask.setBitsFrom(31);
8158   else
8159     return false;
8160 
8161   // Check that our new mask is a subset of the demanded mask.
8162   assert(IsLegalMask(NewMask));
8163   return UseMask(NewMask);
8164 }
8165 
8166 static void computeGREV(APInt &Src, unsigned ShAmt) {
8167   ShAmt &= Src.getBitWidth() - 1;
8168   uint64_t x = Src.getZExtValue();
8169   if (ShAmt & 1)
8170     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8171   if (ShAmt & 2)
8172     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8173   if (ShAmt & 4)
8174     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8175   if (ShAmt & 8)
8176     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8177   if (ShAmt & 16)
8178     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8179   if (ShAmt & 32)
8180     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8181   Src = x;
8182 }
8183 
8184 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8185                                                         KnownBits &Known,
8186                                                         const APInt &DemandedElts,
8187                                                         const SelectionDAG &DAG,
8188                                                         unsigned Depth) const {
8189   unsigned BitWidth = Known.getBitWidth();
8190   unsigned Opc = Op.getOpcode();
8191   assert((Opc >= ISD::BUILTIN_OP_END ||
8192           Opc == ISD::INTRINSIC_WO_CHAIN ||
8193           Opc == ISD::INTRINSIC_W_CHAIN ||
8194           Opc == ISD::INTRINSIC_VOID) &&
8195          "Should use MaskedValueIsZero if you don't know whether Op"
8196          " is a target node!");
8197 
8198   Known.resetAll();
8199   switch (Opc) {
8200   default: break;
8201   case RISCVISD::SELECT_CC: {
8202     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8203     // If we don't know any bits, early out.
8204     if (Known.isUnknown())
8205       break;
8206     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8207 
8208     // Only known if known in both the LHS and RHS.
8209     Known = KnownBits::commonBits(Known, Known2);
8210     break;
8211   }
8212   case RISCVISD::REMUW: {
8213     KnownBits Known2;
8214     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8215     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8216     // We only care about the lower 32 bits.
8217     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8218     // Restore the original width by sign extending.
8219     Known = Known.sext(BitWidth);
8220     break;
8221   }
8222   case RISCVISD::DIVUW: {
8223     KnownBits Known2;
8224     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8225     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8226     // We only care about the lower 32 bits.
8227     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8228     // Restore the original width by sign extending.
8229     Known = Known.sext(BitWidth);
8230     break;
8231   }
8232   case RISCVISD::CTZW: {
8233     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8234     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8235     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8236     Known.Zero.setBitsFrom(LowBits);
8237     break;
8238   }
8239   case RISCVISD::CLZW: {
8240     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8241     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8242     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8243     Known.Zero.setBitsFrom(LowBits);
8244     break;
8245   }
8246   case RISCVISD::GREV:
8247   case RISCVISD::GREVW: {
8248     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8249       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8250       if (Opc == RISCVISD::GREVW)
8251         Known = Known.trunc(32);
8252       unsigned ShAmt = C->getZExtValue();
8253       computeGREV(Known.Zero, ShAmt);
8254       computeGREV(Known.One, ShAmt);
8255       if (Opc == RISCVISD::GREVW)
8256         Known = Known.sext(BitWidth);
8257     }
8258     break;
8259   }
8260   case RISCVISD::READ_VLENB:
8261     // We assume VLENB is at least 16 bytes.
8262     Known.Zero.setLowBits(4);
8263     // We assume VLENB is no more than 65536 / 8 bytes.
8264     Known.Zero.setBitsFrom(14);
8265     break;
8266   case ISD::INTRINSIC_W_CHAIN:
8267   case ISD::INTRINSIC_WO_CHAIN: {
8268     unsigned IntNo =
8269         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8270     switch (IntNo) {
8271     default:
8272       // We can't do anything for most intrinsics.
8273       break;
8274     case Intrinsic::riscv_vsetvli:
8275     case Intrinsic::riscv_vsetvlimax:
8276     case Intrinsic::riscv_vsetvli_opt:
8277     case Intrinsic::riscv_vsetvlimax_opt:
8278       // Assume that VL output is positive and would fit in an int32_t.
8279       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8280       if (BitWidth >= 32)
8281         Known.Zero.setBitsFrom(31);
8282       break;
8283     }
8284     break;
8285   }
8286   }
8287 }
8288 
8289 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8290     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8291     unsigned Depth) const {
8292   switch (Op.getOpcode()) {
8293   default:
8294     break;
8295   case RISCVISD::SELECT_CC: {
8296     unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8297     if (Tmp == 1) return 1;  // Early out.
8298     unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8299     return std::min(Tmp, Tmp2);
8300   }
8301   case RISCVISD::SLLW:
8302   case RISCVISD::SRAW:
8303   case RISCVISD::SRLW:
8304   case RISCVISD::DIVW:
8305   case RISCVISD::DIVUW:
8306   case RISCVISD::REMUW:
8307   case RISCVISD::ROLW:
8308   case RISCVISD::RORW:
8309   case RISCVISD::GREVW:
8310   case RISCVISD::GORCW:
8311   case RISCVISD::FSLW:
8312   case RISCVISD::FSRW:
8313   case RISCVISD::SHFLW:
8314   case RISCVISD::UNSHFLW:
8315   case RISCVISD::BCOMPRESSW:
8316   case RISCVISD::BDECOMPRESSW:
8317   case RISCVISD::BFPW:
8318   case RISCVISD::FCVT_W_RV64:
8319   case RISCVISD::FCVT_WU_RV64:
8320   case RISCVISD::STRICT_FCVT_W_RV64:
8321   case RISCVISD::STRICT_FCVT_WU_RV64:
8322     // TODO: As the result is sign-extended, this is conservatively correct. A
8323     // more precise answer could be calculated for SRAW depending on known
8324     // bits in the shift amount.
8325     return 33;
8326   case RISCVISD::SHFL:
8327   case RISCVISD::UNSHFL: {
8328     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8329     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8330     // will stay within the upper 32 bits. If there were more than 32 sign bits
8331     // before there will be at least 33 sign bits after.
8332     if (Op.getValueType() == MVT::i64 &&
8333         isa<ConstantSDNode>(Op.getOperand(1)) &&
8334         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8335       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8336       if (Tmp > 32)
8337         return 33;
8338     }
8339     break;
8340   }
8341   case RISCVISD::VMV_X_S:
8342     // The number of sign bits of the scalar result is computed by obtaining the
8343     // element type of the input vector operand, subtracting its width from the
8344     // XLEN, and then adding one (sign bit within the element type). If the
8345     // element type is wider than XLen, the least-significant XLEN bits are
8346     // taken.
8347     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
8348       return 1;
8349     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
8350   }
8351 
8352   return 1;
8353 }
8354 
8355 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8356                                                   MachineBasicBlock *BB) {
8357   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8358 
8359   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8360   // Should the count have wrapped while it was being read, we need to try
8361   // again.
8362   // ...
8363   // read:
8364   // rdcycleh x3 # load high word of cycle
8365   // rdcycle  x2 # load low word of cycle
8366   // rdcycleh x4 # load high word of cycle
8367   // bne x3, x4, read # check if high word reads match, otherwise try again
8368   // ...
8369 
8370   MachineFunction &MF = *BB->getParent();
8371   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8372   MachineFunction::iterator It = ++BB->getIterator();
8373 
8374   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8375   MF.insert(It, LoopMBB);
8376 
8377   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8378   MF.insert(It, DoneMBB);
8379 
8380   // Transfer the remainder of BB and its successor edges to DoneMBB.
8381   DoneMBB->splice(DoneMBB->begin(), BB,
8382                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8383   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8384 
8385   BB->addSuccessor(LoopMBB);
8386 
8387   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8388   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8389   Register LoReg = MI.getOperand(0).getReg();
8390   Register HiReg = MI.getOperand(1).getReg();
8391   DebugLoc DL = MI.getDebugLoc();
8392 
8393   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8394   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8395       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8396       .addReg(RISCV::X0);
8397   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8398       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8399       .addReg(RISCV::X0);
8400   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8401       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8402       .addReg(RISCV::X0);
8403 
8404   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8405       .addReg(HiReg)
8406       .addReg(ReadAgainReg)
8407       .addMBB(LoopMBB);
8408 
8409   LoopMBB->addSuccessor(LoopMBB);
8410   LoopMBB->addSuccessor(DoneMBB);
8411 
8412   MI.eraseFromParent();
8413 
8414   return DoneMBB;
8415 }
8416 
8417 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8418                                              MachineBasicBlock *BB) {
8419   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8420 
8421   MachineFunction &MF = *BB->getParent();
8422   DebugLoc DL = MI.getDebugLoc();
8423   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8424   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8425   Register LoReg = MI.getOperand(0).getReg();
8426   Register HiReg = MI.getOperand(1).getReg();
8427   Register SrcReg = MI.getOperand(2).getReg();
8428   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8429   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8430 
8431   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8432                           RI);
8433   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8434   MachineMemOperand *MMOLo =
8435       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8436   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8437       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8438   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8439       .addFrameIndex(FI)
8440       .addImm(0)
8441       .addMemOperand(MMOLo);
8442   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8443       .addFrameIndex(FI)
8444       .addImm(4)
8445       .addMemOperand(MMOHi);
8446   MI.eraseFromParent(); // The pseudo instruction is gone now.
8447   return BB;
8448 }
8449 
8450 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8451                                                  MachineBasicBlock *BB) {
8452   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8453          "Unexpected instruction");
8454 
8455   MachineFunction &MF = *BB->getParent();
8456   DebugLoc DL = MI.getDebugLoc();
8457   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8458   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8459   Register DstReg = MI.getOperand(0).getReg();
8460   Register LoReg = MI.getOperand(1).getReg();
8461   Register HiReg = MI.getOperand(2).getReg();
8462   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8463   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8464 
8465   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8466   MachineMemOperand *MMOLo =
8467       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8468   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8469       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8470   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8471       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8472       .addFrameIndex(FI)
8473       .addImm(0)
8474       .addMemOperand(MMOLo);
8475   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8476       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8477       .addFrameIndex(FI)
8478       .addImm(4)
8479       .addMemOperand(MMOHi);
8480   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8481   MI.eraseFromParent(); // The pseudo instruction is gone now.
8482   return BB;
8483 }
8484 
8485 static bool isSelectPseudo(MachineInstr &MI) {
8486   switch (MI.getOpcode()) {
8487   default:
8488     return false;
8489   case RISCV::Select_GPR_Using_CC_GPR:
8490   case RISCV::Select_FPR16_Using_CC_GPR:
8491   case RISCV::Select_FPR32_Using_CC_GPR:
8492   case RISCV::Select_FPR64_Using_CC_GPR:
8493     return true;
8494   }
8495 }
8496 
8497 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8498                                         unsigned RelOpcode, unsigned EqOpcode,
8499                                         const RISCVSubtarget &Subtarget) {
8500   DebugLoc DL = MI.getDebugLoc();
8501   Register DstReg = MI.getOperand(0).getReg();
8502   Register Src1Reg = MI.getOperand(1).getReg();
8503   Register Src2Reg = MI.getOperand(2).getReg();
8504   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8505   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8506   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8507 
8508   // Save the current FFLAGS.
8509   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8510 
8511   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8512                  .addReg(Src1Reg)
8513                  .addReg(Src2Reg);
8514   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8515     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8516 
8517   // Restore the FFLAGS.
8518   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8519       .addReg(SavedFFlags, RegState::Kill);
8520 
8521   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8522   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8523                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8524                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
8525   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8526     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
8527 
8528   // Erase the pseudoinstruction.
8529   MI.eraseFromParent();
8530   return BB;
8531 }
8532 
8533 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
8534                                            MachineBasicBlock *BB,
8535                                            const RISCVSubtarget &Subtarget) {
8536   // To "insert" Select_* instructions, we actually have to insert the triangle
8537   // control-flow pattern.  The incoming instructions know the destination vreg
8538   // to set, the condition code register to branch on, the true/false values to
8539   // select between, and the condcode to use to select the appropriate branch.
8540   //
8541   // We produce the following control flow:
8542   //     HeadMBB
8543   //     |  \
8544   //     |  IfFalseMBB
8545   //     | /
8546   //    TailMBB
8547   //
8548   // When we find a sequence of selects we attempt to optimize their emission
8549   // by sharing the control flow. Currently we only handle cases where we have
8550   // multiple selects with the exact same condition (same LHS, RHS and CC).
8551   // The selects may be interleaved with other instructions if the other
8552   // instructions meet some requirements we deem safe:
8553   // - They are debug instructions. Otherwise,
8554   // - They do not have side-effects, do not access memory and their inputs do
8555   //   not depend on the results of the select pseudo-instructions.
8556   // The TrueV/FalseV operands of the selects cannot depend on the result of
8557   // previous selects in the sequence.
8558   // These conditions could be further relaxed. See the X86 target for a
8559   // related approach and more information.
8560   Register LHS = MI.getOperand(1).getReg();
8561   Register RHS = MI.getOperand(2).getReg();
8562   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
8563 
8564   SmallVector<MachineInstr *, 4> SelectDebugValues;
8565   SmallSet<Register, 4> SelectDests;
8566   SelectDests.insert(MI.getOperand(0).getReg());
8567 
8568   MachineInstr *LastSelectPseudo = &MI;
8569 
8570   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
8571        SequenceMBBI != E; ++SequenceMBBI) {
8572     if (SequenceMBBI->isDebugInstr())
8573       continue;
8574     else if (isSelectPseudo(*SequenceMBBI)) {
8575       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
8576           SequenceMBBI->getOperand(2).getReg() != RHS ||
8577           SequenceMBBI->getOperand(3).getImm() != CC ||
8578           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
8579           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
8580         break;
8581       LastSelectPseudo = &*SequenceMBBI;
8582       SequenceMBBI->collectDebugValues(SelectDebugValues);
8583       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
8584     } else {
8585       if (SequenceMBBI->hasUnmodeledSideEffects() ||
8586           SequenceMBBI->mayLoadOrStore())
8587         break;
8588       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
8589             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8590           }))
8591         break;
8592     }
8593   }
8594 
8595   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
8596   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8597   DebugLoc DL = MI.getDebugLoc();
8598   MachineFunction::iterator I = ++BB->getIterator();
8599 
8600   MachineBasicBlock *HeadMBB = BB;
8601   MachineFunction *F = BB->getParent();
8602   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
8603   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
8604 
8605   F->insert(I, IfFalseMBB);
8606   F->insert(I, TailMBB);
8607 
8608   // Transfer debug instructions associated with the selects to TailMBB.
8609   for (MachineInstr *DebugInstr : SelectDebugValues) {
8610     TailMBB->push_back(DebugInstr->removeFromParent());
8611   }
8612 
8613   // Move all instructions after the sequence to TailMBB.
8614   TailMBB->splice(TailMBB->end(), HeadMBB,
8615                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
8616   // Update machine-CFG edges by transferring all successors of the current
8617   // block to the new block which will contain the Phi nodes for the selects.
8618   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
8619   // Set the successors for HeadMBB.
8620   HeadMBB->addSuccessor(IfFalseMBB);
8621   HeadMBB->addSuccessor(TailMBB);
8622 
8623   // Insert appropriate branch.
8624   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
8625     .addReg(LHS)
8626     .addReg(RHS)
8627     .addMBB(TailMBB);
8628 
8629   // IfFalseMBB just falls through to TailMBB.
8630   IfFalseMBB->addSuccessor(TailMBB);
8631 
8632   // Create PHIs for all of the select pseudo-instructions.
8633   auto SelectMBBI = MI.getIterator();
8634   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
8635   auto InsertionPoint = TailMBB->begin();
8636   while (SelectMBBI != SelectEnd) {
8637     auto Next = std::next(SelectMBBI);
8638     if (isSelectPseudo(*SelectMBBI)) {
8639       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
8640       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
8641               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
8642           .addReg(SelectMBBI->getOperand(4).getReg())
8643           .addMBB(HeadMBB)
8644           .addReg(SelectMBBI->getOperand(5).getReg())
8645           .addMBB(IfFalseMBB);
8646       SelectMBBI->eraseFromParent();
8647     }
8648     SelectMBBI = Next;
8649   }
8650 
8651   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
8652   return TailMBB;
8653 }
8654 
8655 MachineBasicBlock *
8656 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8657                                                  MachineBasicBlock *BB) const {
8658   switch (MI.getOpcode()) {
8659   default:
8660     llvm_unreachable("Unexpected instr type to insert");
8661   case RISCV::ReadCycleWide:
8662     assert(!Subtarget.is64Bit() &&
8663            "ReadCycleWrite is only to be used on riscv32");
8664     return emitReadCycleWidePseudo(MI, BB);
8665   case RISCV::Select_GPR_Using_CC_GPR:
8666   case RISCV::Select_FPR16_Using_CC_GPR:
8667   case RISCV::Select_FPR32_Using_CC_GPR:
8668   case RISCV::Select_FPR64_Using_CC_GPR:
8669     return emitSelectPseudo(MI, BB, Subtarget);
8670   case RISCV::BuildPairF64Pseudo:
8671     return emitBuildPairF64Pseudo(MI, BB);
8672   case RISCV::SplitF64Pseudo:
8673     return emitSplitF64Pseudo(MI, BB);
8674   case RISCV::PseudoQuietFLE_H:
8675     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
8676   case RISCV::PseudoQuietFLT_H:
8677     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
8678   case RISCV::PseudoQuietFLE_S:
8679     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
8680   case RISCV::PseudoQuietFLT_S:
8681     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
8682   case RISCV::PseudoQuietFLE_D:
8683     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
8684   case RISCV::PseudoQuietFLT_D:
8685     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
8686   }
8687 }
8688 
8689 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8690                                                         SDNode *Node) const {
8691   // Add FRM dependency to any instructions with dynamic rounding mode.
8692   unsigned Opc = MI.getOpcode();
8693   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
8694   if (Idx < 0)
8695     return;
8696   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
8697     return;
8698   // If the instruction already reads FRM, don't add another read.
8699   if (MI.readsRegister(RISCV::FRM))
8700     return;
8701   MI.addOperand(
8702       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
8703 }
8704 
8705 // Calling Convention Implementation.
8706 // The expectations for frontend ABI lowering vary from target to target.
8707 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
8708 // details, but this is a longer term goal. For now, we simply try to keep the
8709 // role of the frontend as simple and well-defined as possible. The rules can
8710 // be summarised as:
8711 // * Never split up large scalar arguments. We handle them here.
8712 // * If a hardfloat calling convention is being used, and the struct may be
8713 // passed in a pair of registers (fp+fp, int+fp), and both registers are
8714 // available, then pass as two separate arguments. If either the GPRs or FPRs
8715 // are exhausted, then pass according to the rule below.
8716 // * If a struct could never be passed in registers or directly in a stack
8717 // slot (as it is larger than 2*XLEN and the floating point rules don't
8718 // apply), then pass it using a pointer with the byval attribute.
8719 // * If a struct is less than 2*XLEN, then coerce to either a two-element
8720 // word-sized array or a 2*XLEN scalar (depending on alignment).
8721 // * The frontend can determine whether a struct is returned by reference or
8722 // not based on its size and fields. If it will be returned by reference, the
8723 // frontend must modify the prototype so a pointer with the sret annotation is
8724 // passed as the first argument. This is not necessary for large scalar
8725 // returns.
8726 // * Struct return values and varargs should be coerced to structs containing
8727 // register-size fields in the same situations they would be for fixed
8728 // arguments.
8729 
8730 static const MCPhysReg ArgGPRs[] = {
8731   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
8732   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
8733 };
8734 static const MCPhysReg ArgFPR16s[] = {
8735   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
8736   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
8737 };
8738 static const MCPhysReg ArgFPR32s[] = {
8739   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
8740   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
8741 };
8742 static const MCPhysReg ArgFPR64s[] = {
8743   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
8744   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
8745 };
8746 // This is an interim calling convention and it may be changed in the future.
8747 static const MCPhysReg ArgVRs[] = {
8748     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
8749     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
8750     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
8751 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
8752                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
8753                                      RISCV::V20M2, RISCV::V22M2};
8754 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
8755                                      RISCV::V20M4};
8756 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
8757 
8758 // Pass a 2*XLEN argument that has been split into two XLEN values through
8759 // registers or the stack as necessary.
8760 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
8761                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
8762                                 MVT ValVT2, MVT LocVT2,
8763                                 ISD::ArgFlagsTy ArgFlags2) {
8764   unsigned XLenInBytes = XLen / 8;
8765   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8766     // At least one half can be passed via register.
8767     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
8768                                      VA1.getLocVT(), CCValAssign::Full));
8769   } else {
8770     // Both halves must be passed on the stack, with proper alignment.
8771     Align StackAlign =
8772         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
8773     State.addLoc(
8774         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
8775                             State.AllocateStack(XLenInBytes, StackAlign),
8776                             VA1.getLocVT(), CCValAssign::Full));
8777     State.addLoc(CCValAssign::getMem(
8778         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8779         LocVT2, CCValAssign::Full));
8780     return false;
8781   }
8782 
8783   if (Register Reg = State.AllocateReg(ArgGPRs)) {
8784     // The second half can also be passed via register.
8785     State.addLoc(
8786         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
8787   } else {
8788     // The second half is passed via the stack, without additional alignment.
8789     State.addLoc(CCValAssign::getMem(
8790         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
8791         LocVT2, CCValAssign::Full));
8792   }
8793 
8794   return false;
8795 }
8796 
8797 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
8798                                Optional<unsigned> FirstMaskArgument,
8799                                CCState &State, const RISCVTargetLowering &TLI) {
8800   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
8801   if (RC == &RISCV::VRRegClass) {
8802     // Assign the first mask argument to V0.
8803     // This is an interim calling convention and it may be changed in the
8804     // future.
8805     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
8806       return State.AllocateReg(RISCV::V0);
8807     return State.AllocateReg(ArgVRs);
8808   }
8809   if (RC == &RISCV::VRM2RegClass)
8810     return State.AllocateReg(ArgVRM2s);
8811   if (RC == &RISCV::VRM4RegClass)
8812     return State.AllocateReg(ArgVRM4s);
8813   if (RC == &RISCV::VRM8RegClass)
8814     return State.AllocateReg(ArgVRM8s);
8815   llvm_unreachable("Unhandled register class for ValueType");
8816 }
8817 
8818 // Implements the RISC-V calling convention. Returns true upon failure.
8819 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
8820                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
8821                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
8822                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
8823                      Optional<unsigned> FirstMaskArgument) {
8824   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
8825   assert(XLen == 32 || XLen == 64);
8826   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
8827 
8828   // Any return value split in to more than two values can't be returned
8829   // directly. Vectors are returned via the available vector registers.
8830   if (!LocVT.isVector() && IsRet && ValNo > 1)
8831     return true;
8832 
8833   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
8834   // variadic argument, or if no F16/F32 argument registers are available.
8835   bool UseGPRForF16_F32 = true;
8836   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
8837   // variadic argument, or if no F64 argument registers are available.
8838   bool UseGPRForF64 = true;
8839 
8840   switch (ABI) {
8841   default:
8842     llvm_unreachable("Unexpected ABI");
8843   case RISCVABI::ABI_ILP32:
8844   case RISCVABI::ABI_LP64:
8845     break;
8846   case RISCVABI::ABI_ILP32F:
8847   case RISCVABI::ABI_LP64F:
8848     UseGPRForF16_F32 = !IsFixed;
8849     break;
8850   case RISCVABI::ABI_ILP32D:
8851   case RISCVABI::ABI_LP64D:
8852     UseGPRForF16_F32 = !IsFixed;
8853     UseGPRForF64 = !IsFixed;
8854     break;
8855   }
8856 
8857   // FPR16, FPR32, and FPR64 alias each other.
8858   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
8859     UseGPRForF16_F32 = true;
8860     UseGPRForF64 = true;
8861   }
8862 
8863   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
8864   // similar local variables rather than directly checking against the target
8865   // ABI.
8866 
8867   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
8868     LocVT = XLenVT;
8869     LocInfo = CCValAssign::BCvt;
8870   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
8871     LocVT = MVT::i64;
8872     LocInfo = CCValAssign::BCvt;
8873   }
8874 
8875   // If this is a variadic argument, the RISC-V calling convention requires
8876   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
8877   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
8878   // be used regardless of whether the original argument was split during
8879   // legalisation or not. The argument will not be passed by registers if the
8880   // original type is larger than 2*XLEN, so the register alignment rule does
8881   // not apply.
8882   unsigned TwoXLenInBytes = (2 * XLen) / 8;
8883   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
8884       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
8885     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
8886     // Skip 'odd' register if necessary.
8887     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
8888       State.AllocateReg(ArgGPRs);
8889   }
8890 
8891   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
8892   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
8893       State.getPendingArgFlags();
8894 
8895   assert(PendingLocs.size() == PendingArgFlags.size() &&
8896          "PendingLocs and PendingArgFlags out of sync");
8897 
8898   // Handle passing f64 on RV32D with a soft float ABI or when floating point
8899   // registers are exhausted.
8900   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
8901     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
8902            "Can't lower f64 if it is split");
8903     // Depending on available argument GPRS, f64 may be passed in a pair of
8904     // GPRs, split between a GPR and the stack, or passed completely on the
8905     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
8906     // cases.
8907     Register Reg = State.AllocateReg(ArgGPRs);
8908     LocVT = MVT::i32;
8909     if (!Reg) {
8910       unsigned StackOffset = State.AllocateStack(8, Align(8));
8911       State.addLoc(
8912           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8913       return false;
8914     }
8915     if (!State.AllocateReg(ArgGPRs))
8916       State.AllocateStack(4, Align(4));
8917     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8918     return false;
8919   }
8920 
8921   // Fixed-length vectors are located in the corresponding scalable-vector
8922   // container types.
8923   if (ValVT.isFixedLengthVector())
8924     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8925 
8926   // Split arguments might be passed indirectly, so keep track of the pending
8927   // values. Split vectors are passed via a mix of registers and indirectly, so
8928   // treat them as we would any other argument.
8929   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
8930     LocVT = XLenVT;
8931     LocInfo = CCValAssign::Indirect;
8932     PendingLocs.push_back(
8933         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
8934     PendingArgFlags.push_back(ArgFlags);
8935     if (!ArgFlags.isSplitEnd()) {
8936       return false;
8937     }
8938   }
8939 
8940   // If the split argument only had two elements, it should be passed directly
8941   // in registers or on the stack.
8942   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
8943       PendingLocs.size() <= 2) {
8944     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
8945     // Apply the normal calling convention rules to the first half of the
8946     // split argument.
8947     CCValAssign VA = PendingLocs[0];
8948     ISD::ArgFlagsTy AF = PendingArgFlags[0];
8949     PendingLocs.clear();
8950     PendingArgFlags.clear();
8951     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
8952                                ArgFlags);
8953   }
8954 
8955   // Allocate to a register if possible, or else a stack slot.
8956   Register Reg;
8957   unsigned StoreSizeBytes = XLen / 8;
8958   Align StackAlign = Align(XLen / 8);
8959 
8960   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8961     Reg = State.AllocateReg(ArgFPR16s);
8962   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8963     Reg = State.AllocateReg(ArgFPR32s);
8964   else if (ValVT == MVT::f64 && !UseGPRForF64)
8965     Reg = State.AllocateReg(ArgFPR64s);
8966   else if (ValVT.isVector()) {
8967     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8968     if (!Reg) {
8969       // For return values, the vector must be passed fully via registers or
8970       // via the stack.
8971       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8972       // but we're using all of them.
8973       if (IsRet)
8974         return true;
8975       // Try using a GPR to pass the address
8976       if ((Reg = State.AllocateReg(ArgGPRs))) {
8977         LocVT = XLenVT;
8978         LocInfo = CCValAssign::Indirect;
8979       } else if (ValVT.isScalableVector()) {
8980         LocVT = XLenVT;
8981         LocInfo = CCValAssign::Indirect;
8982       } else {
8983         // Pass fixed-length vectors on the stack.
8984         LocVT = ValVT;
8985         StoreSizeBytes = ValVT.getStoreSize();
8986         // Align vectors to their element sizes, being careful for vXi1
8987         // vectors.
8988         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8989       }
8990     }
8991   } else {
8992     Reg = State.AllocateReg(ArgGPRs);
8993   }
8994 
8995   unsigned StackOffset =
8996       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8997 
8998   // If we reach this point and PendingLocs is non-empty, we must be at the
8999   // end of a split argument that must be passed indirectly.
9000   if (!PendingLocs.empty()) {
9001     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9002     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9003 
9004     for (auto &It : PendingLocs) {
9005       if (Reg)
9006         It.convertToReg(Reg);
9007       else
9008         It.convertToMem(StackOffset);
9009       State.addLoc(It);
9010     }
9011     PendingLocs.clear();
9012     PendingArgFlags.clear();
9013     return false;
9014   }
9015 
9016   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9017           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9018          "Expected an XLenVT or vector types at this stage");
9019 
9020   if (Reg) {
9021     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9022     return false;
9023   }
9024 
9025   // When a floating-point value is passed on the stack, no bit-conversion is
9026   // needed.
9027   if (ValVT.isFloatingPoint()) {
9028     LocVT = ValVT;
9029     LocInfo = CCValAssign::Full;
9030   }
9031   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9032   return false;
9033 }
9034 
9035 template <typename ArgTy>
9036 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9037   for (const auto &ArgIdx : enumerate(Args)) {
9038     MVT ArgVT = ArgIdx.value().VT;
9039     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9040       return ArgIdx.index();
9041   }
9042   return None;
9043 }
9044 
9045 void RISCVTargetLowering::analyzeInputArgs(
9046     MachineFunction &MF, CCState &CCInfo,
9047     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9048     RISCVCCAssignFn Fn) const {
9049   unsigned NumArgs = Ins.size();
9050   FunctionType *FType = MF.getFunction().getFunctionType();
9051 
9052   Optional<unsigned> FirstMaskArgument;
9053   if (Subtarget.hasVInstructions())
9054     FirstMaskArgument = preAssignMask(Ins);
9055 
9056   for (unsigned i = 0; i != NumArgs; ++i) {
9057     MVT ArgVT = Ins[i].VT;
9058     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9059 
9060     Type *ArgTy = nullptr;
9061     if (IsRet)
9062       ArgTy = FType->getReturnType();
9063     else if (Ins[i].isOrigArg())
9064       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9065 
9066     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9067     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9068            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9069            FirstMaskArgument)) {
9070       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9071                         << EVT(ArgVT).getEVTString() << '\n');
9072       llvm_unreachable(nullptr);
9073     }
9074   }
9075 }
9076 
9077 void RISCVTargetLowering::analyzeOutputArgs(
9078     MachineFunction &MF, CCState &CCInfo,
9079     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9080     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9081   unsigned NumArgs = Outs.size();
9082 
9083   Optional<unsigned> FirstMaskArgument;
9084   if (Subtarget.hasVInstructions())
9085     FirstMaskArgument = preAssignMask(Outs);
9086 
9087   for (unsigned i = 0; i != NumArgs; i++) {
9088     MVT ArgVT = Outs[i].VT;
9089     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9090     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9091 
9092     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9093     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9094            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9095            FirstMaskArgument)) {
9096       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9097                         << EVT(ArgVT).getEVTString() << "\n");
9098       llvm_unreachable(nullptr);
9099     }
9100   }
9101 }
9102 
9103 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9104 // values.
9105 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9106                                    const CCValAssign &VA, const SDLoc &DL,
9107                                    const RISCVSubtarget &Subtarget) {
9108   switch (VA.getLocInfo()) {
9109   default:
9110     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9111   case CCValAssign::Full:
9112     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9113       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9114     break;
9115   case CCValAssign::BCvt:
9116     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9117       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9118     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9119       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9120     else
9121       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9122     break;
9123   }
9124   return Val;
9125 }
9126 
9127 // The caller is responsible for loading the full value if the argument is
9128 // passed with CCValAssign::Indirect.
9129 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9130                                 const CCValAssign &VA, const SDLoc &DL,
9131                                 const RISCVTargetLowering &TLI) {
9132   MachineFunction &MF = DAG.getMachineFunction();
9133   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9134   EVT LocVT = VA.getLocVT();
9135   SDValue Val;
9136   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9137   Register VReg = RegInfo.createVirtualRegister(RC);
9138   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9139   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9140 
9141   if (VA.getLocInfo() == CCValAssign::Indirect)
9142     return Val;
9143 
9144   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9145 }
9146 
9147 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9148                                    const CCValAssign &VA, const SDLoc &DL,
9149                                    const RISCVSubtarget &Subtarget) {
9150   EVT LocVT = VA.getLocVT();
9151 
9152   switch (VA.getLocInfo()) {
9153   default:
9154     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9155   case CCValAssign::Full:
9156     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9157       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9158     break;
9159   case CCValAssign::BCvt:
9160     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9161       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9162     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9163       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9164     else
9165       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9166     break;
9167   }
9168   return Val;
9169 }
9170 
9171 // The caller is responsible for loading the full value if the argument is
9172 // passed with CCValAssign::Indirect.
9173 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9174                                 const CCValAssign &VA, const SDLoc &DL) {
9175   MachineFunction &MF = DAG.getMachineFunction();
9176   MachineFrameInfo &MFI = MF.getFrameInfo();
9177   EVT LocVT = VA.getLocVT();
9178   EVT ValVT = VA.getValVT();
9179   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9180   if (ValVT.isScalableVector()) {
9181     // When the value is a scalable vector, we save the pointer which points to
9182     // the scalable vector value in the stack. The ValVT will be the pointer
9183     // type, instead of the scalable vector type.
9184     ValVT = LocVT;
9185   }
9186   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9187                                  /*IsImmutable=*/true);
9188   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9189   SDValue Val;
9190 
9191   ISD::LoadExtType ExtType;
9192   switch (VA.getLocInfo()) {
9193   default:
9194     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9195   case CCValAssign::Full:
9196   case CCValAssign::Indirect:
9197   case CCValAssign::BCvt:
9198     ExtType = ISD::NON_EXTLOAD;
9199     break;
9200   }
9201   Val = DAG.getExtLoad(
9202       ExtType, DL, LocVT, Chain, FIN,
9203       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9204   return Val;
9205 }
9206 
9207 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9208                                        const CCValAssign &VA, const SDLoc &DL) {
9209   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9210          "Unexpected VA");
9211   MachineFunction &MF = DAG.getMachineFunction();
9212   MachineFrameInfo &MFI = MF.getFrameInfo();
9213   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9214 
9215   if (VA.isMemLoc()) {
9216     // f64 is passed on the stack.
9217     int FI =
9218         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9219     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9220     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9221                        MachinePointerInfo::getFixedStack(MF, FI));
9222   }
9223 
9224   assert(VA.isRegLoc() && "Expected register VA assignment");
9225 
9226   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9227   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9228   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9229   SDValue Hi;
9230   if (VA.getLocReg() == RISCV::X17) {
9231     // Second half of f64 is passed on the stack.
9232     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9233     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9234     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9235                      MachinePointerInfo::getFixedStack(MF, FI));
9236   } else {
9237     // Second half of f64 is passed in another GPR.
9238     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9239     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9240     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9241   }
9242   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9243 }
9244 
9245 // FastCC has less than 1% performance improvement for some particular
9246 // benchmark. But theoretically, it may has benenfit for some cases.
9247 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9248                             unsigned ValNo, MVT ValVT, MVT LocVT,
9249                             CCValAssign::LocInfo LocInfo,
9250                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9251                             bool IsFixed, bool IsRet, Type *OrigTy,
9252                             const RISCVTargetLowering &TLI,
9253                             Optional<unsigned> FirstMaskArgument) {
9254 
9255   // X5 and X6 might be used for save-restore libcall.
9256   static const MCPhysReg GPRList[] = {
9257       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9258       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9259       RISCV::X29, RISCV::X30, RISCV::X31};
9260 
9261   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9262     if (unsigned Reg = State.AllocateReg(GPRList)) {
9263       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9264       return false;
9265     }
9266   }
9267 
9268   if (LocVT == MVT::f16) {
9269     static const MCPhysReg FPR16List[] = {
9270         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9271         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9272         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9273         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9274     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9275       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9276       return false;
9277     }
9278   }
9279 
9280   if (LocVT == MVT::f32) {
9281     static const MCPhysReg FPR32List[] = {
9282         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9283         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9284         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9285         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9286     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9287       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9288       return false;
9289     }
9290   }
9291 
9292   if (LocVT == MVT::f64) {
9293     static const MCPhysReg FPR64List[] = {
9294         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9295         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9296         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9297         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9298     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9299       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9300       return false;
9301     }
9302   }
9303 
9304   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9305     unsigned Offset4 = State.AllocateStack(4, Align(4));
9306     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9307     return false;
9308   }
9309 
9310   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9311     unsigned Offset5 = State.AllocateStack(8, Align(8));
9312     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9313     return false;
9314   }
9315 
9316   if (LocVT.isVector()) {
9317     if (unsigned Reg =
9318             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9319       // Fixed-length vectors are located in the corresponding scalable-vector
9320       // container types.
9321       if (ValVT.isFixedLengthVector())
9322         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9323       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9324     } else {
9325       // Try and pass the address via a "fast" GPR.
9326       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9327         LocInfo = CCValAssign::Indirect;
9328         LocVT = TLI.getSubtarget().getXLenVT();
9329         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9330       } else if (ValVT.isFixedLengthVector()) {
9331         auto StackAlign =
9332             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9333         unsigned StackOffset =
9334             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9335         State.addLoc(
9336             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9337       } else {
9338         // Can't pass scalable vectors on the stack.
9339         return true;
9340       }
9341     }
9342 
9343     return false;
9344   }
9345 
9346   return true; // CC didn't match.
9347 }
9348 
9349 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9350                          CCValAssign::LocInfo LocInfo,
9351                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9352 
9353   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9354     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9355     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9356     static const MCPhysReg GPRList[] = {
9357         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9358         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9359     if (unsigned Reg = State.AllocateReg(GPRList)) {
9360       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9361       return false;
9362     }
9363   }
9364 
9365   if (LocVT == MVT::f32) {
9366     // Pass in STG registers: F1, ..., F6
9367     //                        fs0 ... fs5
9368     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9369                                           RISCV::F18_F, RISCV::F19_F,
9370                                           RISCV::F20_F, RISCV::F21_F};
9371     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9372       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9373       return false;
9374     }
9375   }
9376 
9377   if (LocVT == MVT::f64) {
9378     // Pass in STG registers: D1, ..., D6
9379     //                        fs6 ... fs11
9380     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9381                                           RISCV::F24_D, RISCV::F25_D,
9382                                           RISCV::F26_D, RISCV::F27_D};
9383     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9384       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9385       return false;
9386     }
9387   }
9388 
9389   report_fatal_error("No registers left in GHC calling convention");
9390   return true;
9391 }
9392 
9393 // Transform physical registers into virtual registers.
9394 SDValue RISCVTargetLowering::LowerFormalArguments(
9395     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9396     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9397     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9398 
9399   MachineFunction &MF = DAG.getMachineFunction();
9400 
9401   switch (CallConv) {
9402   default:
9403     report_fatal_error("Unsupported calling convention");
9404   case CallingConv::C:
9405   case CallingConv::Fast:
9406     break;
9407   case CallingConv::GHC:
9408     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9409         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9410       report_fatal_error(
9411         "GHC calling convention requires the F and D instruction set extensions");
9412   }
9413 
9414   const Function &Func = MF.getFunction();
9415   if (Func.hasFnAttribute("interrupt")) {
9416     if (!Func.arg_empty())
9417       report_fatal_error(
9418         "Functions with the interrupt attribute cannot have arguments!");
9419 
9420     StringRef Kind =
9421       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9422 
9423     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9424       report_fatal_error(
9425         "Function interrupt attribute argument not supported!");
9426   }
9427 
9428   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9429   MVT XLenVT = Subtarget.getXLenVT();
9430   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9431   // Used with vargs to acumulate store chains.
9432   std::vector<SDValue> OutChains;
9433 
9434   // Assign locations to all of the incoming arguments.
9435   SmallVector<CCValAssign, 16> ArgLocs;
9436   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9437 
9438   if (CallConv == CallingConv::GHC)
9439     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9440   else
9441     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9442                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9443                                                    : CC_RISCV);
9444 
9445   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9446     CCValAssign &VA = ArgLocs[i];
9447     SDValue ArgValue;
9448     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9449     // case.
9450     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9451       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9452     else if (VA.isRegLoc())
9453       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9454     else
9455       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9456 
9457     if (VA.getLocInfo() == CCValAssign::Indirect) {
9458       // If the original argument was split and passed by reference (e.g. i128
9459       // on RV32), we need to load all parts of it here (using the same
9460       // address). Vectors may be partly split to registers and partly to the
9461       // stack, in which case the base address is partly offset and subsequent
9462       // stores are relative to that.
9463       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9464                                    MachinePointerInfo()));
9465       unsigned ArgIndex = Ins[i].OrigArgIndex;
9466       unsigned ArgPartOffset = Ins[i].PartOffset;
9467       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9468       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9469         CCValAssign &PartVA = ArgLocs[i + 1];
9470         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9471         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9472         if (PartVA.getValVT().isScalableVector())
9473           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9474         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9475         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9476                                      MachinePointerInfo()));
9477         ++i;
9478       }
9479       continue;
9480     }
9481     InVals.push_back(ArgValue);
9482   }
9483 
9484   if (IsVarArg) {
9485     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9486     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9487     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9488     MachineFrameInfo &MFI = MF.getFrameInfo();
9489     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9490     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9491 
9492     // Offset of the first variable argument from stack pointer, and size of
9493     // the vararg save area. For now, the varargs save area is either zero or
9494     // large enough to hold a0-a7.
9495     int VaArgOffset, VarArgsSaveSize;
9496 
9497     // If all registers are allocated, then all varargs must be passed on the
9498     // stack and we don't need to save any argregs.
9499     if (ArgRegs.size() == Idx) {
9500       VaArgOffset = CCInfo.getNextStackOffset();
9501       VarArgsSaveSize = 0;
9502     } else {
9503       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9504       VaArgOffset = -VarArgsSaveSize;
9505     }
9506 
9507     // Record the frame index of the first variable argument
9508     // which is a value necessary to VASTART.
9509     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9510     RVFI->setVarArgsFrameIndex(FI);
9511 
9512     // If saving an odd number of registers then create an extra stack slot to
9513     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9514     // offsets to even-numbered registered remain 2*XLEN-aligned.
9515     if (Idx % 2) {
9516       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9517       VarArgsSaveSize += XLenInBytes;
9518     }
9519 
9520     // Copy the integer registers that may have been used for passing varargs
9521     // to the vararg save area.
9522     for (unsigned I = Idx; I < ArgRegs.size();
9523          ++I, VaArgOffset += XLenInBytes) {
9524       const Register Reg = RegInfo.createVirtualRegister(RC);
9525       RegInfo.addLiveIn(ArgRegs[I], Reg);
9526       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
9527       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9528       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9529       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
9530                                    MachinePointerInfo::getFixedStack(MF, FI));
9531       cast<StoreSDNode>(Store.getNode())
9532           ->getMemOperand()
9533           ->setValue((Value *)nullptr);
9534       OutChains.push_back(Store);
9535     }
9536     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
9537   }
9538 
9539   // All stores are grouped in one node to allow the matching between
9540   // the size of Ins and InVals. This only happens for vararg functions.
9541   if (!OutChains.empty()) {
9542     OutChains.push_back(Chain);
9543     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
9544   }
9545 
9546   return Chain;
9547 }
9548 
9549 /// isEligibleForTailCallOptimization - Check whether the call is eligible
9550 /// for tail call optimization.
9551 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9552 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9553     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9554     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9555 
9556   auto &Callee = CLI.Callee;
9557   auto CalleeCC = CLI.CallConv;
9558   auto &Outs = CLI.Outs;
9559   auto &Caller = MF.getFunction();
9560   auto CallerCC = Caller.getCallingConv();
9561 
9562   // Exception-handling functions need a special set of instructions to
9563   // indicate a return to the hardware. Tail-calling another function would
9564   // probably break this.
9565   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
9566   // should be expanded as new function attributes are introduced.
9567   if (Caller.hasFnAttribute("interrupt"))
9568     return false;
9569 
9570   // Do not tail call opt if the stack is used to pass parameters.
9571   if (CCInfo.getNextStackOffset() != 0)
9572     return false;
9573 
9574   // Do not tail call opt if any parameters need to be passed indirectly.
9575   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
9576   // passed indirectly. So the address of the value will be passed in a
9577   // register, or if not available, then the address is put on the stack. In
9578   // order to pass indirectly, space on the stack often needs to be allocated
9579   // in order to store the value. In this case the CCInfo.getNextStackOffset()
9580   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
9581   // are passed CCValAssign::Indirect.
9582   for (auto &VA : ArgLocs)
9583     if (VA.getLocInfo() == CCValAssign::Indirect)
9584       return false;
9585 
9586   // Do not tail call opt if either caller or callee uses struct return
9587   // semantics.
9588   auto IsCallerStructRet = Caller.hasStructRetAttr();
9589   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
9590   if (IsCallerStructRet || IsCalleeStructRet)
9591     return false;
9592 
9593   // Externally-defined functions with weak linkage should not be
9594   // tail-called. The behaviour of branch instructions in this situation (as
9595   // used for tail calls) is implementation-defined, so we cannot rely on the
9596   // linker replacing the tail call with a return.
9597   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
9598     const GlobalValue *GV = G->getGlobal();
9599     if (GV->hasExternalWeakLinkage())
9600       return false;
9601   }
9602 
9603   // The callee has to preserve all registers the caller needs to preserve.
9604   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9605   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9606   if (CalleeCC != CallerCC) {
9607     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9608     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9609       return false;
9610   }
9611 
9612   // Byval parameters hand the function a pointer directly into the stack area
9613   // we want to reuse during a tail call. Working around this *is* possible
9614   // but less efficient and uglier in LowerCall.
9615   for (auto &Arg : Outs)
9616     if (Arg.Flags.isByVal())
9617       return false;
9618 
9619   return true;
9620 }
9621 
9622 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
9623   return DAG.getDataLayout().getPrefTypeAlign(
9624       VT.getTypeForEVT(*DAG.getContext()));
9625 }
9626 
9627 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
9628 // and output parameter nodes.
9629 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
9630                                        SmallVectorImpl<SDValue> &InVals) const {
9631   SelectionDAG &DAG = CLI.DAG;
9632   SDLoc &DL = CLI.DL;
9633   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
9634   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
9635   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
9636   SDValue Chain = CLI.Chain;
9637   SDValue Callee = CLI.Callee;
9638   bool &IsTailCall = CLI.IsTailCall;
9639   CallingConv::ID CallConv = CLI.CallConv;
9640   bool IsVarArg = CLI.IsVarArg;
9641   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9642   MVT XLenVT = Subtarget.getXLenVT();
9643 
9644   MachineFunction &MF = DAG.getMachineFunction();
9645 
9646   // Analyze the operands of the call, assigning locations to each operand.
9647   SmallVector<CCValAssign, 16> ArgLocs;
9648   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9649 
9650   if (CallConv == CallingConv::GHC)
9651     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
9652   else
9653     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
9654                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9655                                                     : CC_RISCV);
9656 
9657   // Check if it's really possible to do a tail call.
9658   if (IsTailCall)
9659     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9660 
9661   if (IsTailCall)
9662     ++NumTailCalls;
9663   else if (CLI.CB && CLI.CB->isMustTailCall())
9664     report_fatal_error("failed to perform tail call elimination on a call "
9665                        "site marked musttail");
9666 
9667   // Get a count of how many bytes are to be pushed on the stack.
9668   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
9669 
9670   // Create local copies for byval args
9671   SmallVector<SDValue, 8> ByValArgs;
9672   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9673     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9674     if (!Flags.isByVal())
9675       continue;
9676 
9677     SDValue Arg = OutVals[i];
9678     unsigned Size = Flags.getByValSize();
9679     Align Alignment = Flags.getNonZeroByValAlign();
9680 
9681     int FI =
9682         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
9683     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9684     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
9685 
9686     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
9687                           /*IsVolatile=*/false,
9688                           /*AlwaysInline=*/false, IsTailCall,
9689                           MachinePointerInfo(), MachinePointerInfo());
9690     ByValArgs.push_back(FIPtr);
9691   }
9692 
9693   if (!IsTailCall)
9694     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
9695 
9696   // Copy argument values to their designated locations.
9697   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
9698   SmallVector<SDValue, 8> MemOpChains;
9699   SDValue StackPtr;
9700   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
9701     CCValAssign &VA = ArgLocs[i];
9702     SDValue ArgValue = OutVals[i];
9703     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9704 
9705     // Handle passing f64 on RV32D with a soft float ABI as a special case.
9706     bool IsF64OnRV32DSoftABI =
9707         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
9708     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
9709       SDValue SplitF64 = DAG.getNode(
9710           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
9711       SDValue Lo = SplitF64.getValue(0);
9712       SDValue Hi = SplitF64.getValue(1);
9713 
9714       Register RegLo = VA.getLocReg();
9715       RegsToPass.push_back(std::make_pair(RegLo, Lo));
9716 
9717       if (RegLo == RISCV::X17) {
9718         // Second half of f64 is passed on the stack.
9719         // Work out the address of the stack slot.
9720         if (!StackPtr.getNode())
9721           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9722         // Emit the store.
9723         MemOpChains.push_back(
9724             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
9725       } else {
9726         // Second half of f64 is passed in another GPR.
9727         assert(RegLo < RISCV::X31 && "Invalid register pair");
9728         Register RegHigh = RegLo + 1;
9729         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
9730       }
9731       continue;
9732     }
9733 
9734     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
9735     // as any other MemLoc.
9736 
9737     // Promote the value if needed.
9738     // For now, only handle fully promoted and indirect arguments.
9739     if (VA.getLocInfo() == CCValAssign::Indirect) {
9740       // Store the argument in a stack slot and pass its address.
9741       Align StackAlign =
9742           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
9743                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
9744       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
9745       // If the original argument was split (e.g. i128), we need
9746       // to store the required parts of it here (and pass just one address).
9747       // Vectors may be partly split to registers and partly to the stack, in
9748       // which case the base address is partly offset and subsequent stores are
9749       // relative to that.
9750       unsigned ArgIndex = Outs[i].OrigArgIndex;
9751       unsigned ArgPartOffset = Outs[i].PartOffset;
9752       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9753       // Calculate the total size to store. We don't have access to what we're
9754       // actually storing other than performing the loop and collecting the
9755       // info.
9756       SmallVector<std::pair<SDValue, SDValue>> Parts;
9757       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
9758         SDValue PartValue = OutVals[i + 1];
9759         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
9760         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9761         EVT PartVT = PartValue.getValueType();
9762         if (PartVT.isScalableVector())
9763           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9764         StoredSize += PartVT.getStoreSize();
9765         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
9766         Parts.push_back(std::make_pair(PartValue, Offset));
9767         ++i;
9768       }
9769       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
9770       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
9771       MemOpChains.push_back(
9772           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
9773                        MachinePointerInfo::getFixedStack(MF, FI)));
9774       for (const auto &Part : Parts) {
9775         SDValue PartValue = Part.first;
9776         SDValue PartOffset = Part.second;
9777         SDValue Address =
9778             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
9779         MemOpChains.push_back(
9780             DAG.getStore(Chain, DL, PartValue, Address,
9781                          MachinePointerInfo::getFixedStack(MF, FI)));
9782       }
9783       ArgValue = SpillSlot;
9784     } else {
9785       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
9786     }
9787 
9788     // Use local copy if it is a byval arg.
9789     if (Flags.isByVal())
9790       ArgValue = ByValArgs[j++];
9791 
9792     if (VA.isRegLoc()) {
9793       // Queue up the argument copies and emit them at the end.
9794       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
9795     } else {
9796       assert(VA.isMemLoc() && "Argument not register or memory");
9797       assert(!IsTailCall && "Tail call not allowed if stack is used "
9798                             "for passing parameters");
9799 
9800       // Work out the address of the stack slot.
9801       if (!StackPtr.getNode())
9802         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
9803       SDValue Address =
9804           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
9805                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
9806 
9807       // Emit the store.
9808       MemOpChains.push_back(
9809           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
9810     }
9811   }
9812 
9813   // Join the stores, which are independent of one another.
9814   if (!MemOpChains.empty())
9815     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
9816 
9817   SDValue Glue;
9818 
9819   // Build a sequence of copy-to-reg nodes, chained and glued together.
9820   for (auto &Reg : RegsToPass) {
9821     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
9822     Glue = Chain.getValue(1);
9823   }
9824 
9825   // Validate that none of the argument registers have been marked as
9826   // reserved, if so report an error. Do the same for the return address if this
9827   // is not a tailcall.
9828   validateCCReservedRegs(RegsToPass, MF);
9829   if (!IsTailCall &&
9830       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
9831     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9832         MF.getFunction(),
9833         "Return address register required, but has been reserved."});
9834 
9835   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
9836   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
9837   // split it and then direct call can be matched by PseudoCALL.
9838   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
9839     const GlobalValue *GV = S->getGlobal();
9840 
9841     unsigned OpFlags = RISCVII::MO_CALL;
9842     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
9843       OpFlags = RISCVII::MO_PLT;
9844 
9845     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
9846   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
9847     unsigned OpFlags = RISCVII::MO_CALL;
9848 
9849     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
9850                                                  nullptr))
9851       OpFlags = RISCVII::MO_PLT;
9852 
9853     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
9854   }
9855 
9856   // The first call operand is the chain and the second is the target address.
9857   SmallVector<SDValue, 8> Ops;
9858   Ops.push_back(Chain);
9859   Ops.push_back(Callee);
9860 
9861   // Add argument registers to the end of the list so that they are
9862   // known live into the call.
9863   for (auto &Reg : RegsToPass)
9864     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
9865 
9866   if (!IsTailCall) {
9867     // Add a register mask operand representing the call-preserved registers.
9868     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
9869     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
9870     assert(Mask && "Missing call preserved mask for calling convention");
9871     Ops.push_back(DAG.getRegisterMask(Mask));
9872   }
9873 
9874   // Glue the call to the argument copies, if any.
9875   if (Glue.getNode())
9876     Ops.push_back(Glue);
9877 
9878   // Emit the call.
9879   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
9880 
9881   if (IsTailCall) {
9882     MF.getFrameInfo().setHasTailCall();
9883     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
9884   }
9885 
9886   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
9887   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
9888   Glue = Chain.getValue(1);
9889 
9890   // Mark the end of the call, which is glued to the call itself.
9891   Chain = DAG.getCALLSEQ_END(Chain,
9892                              DAG.getConstant(NumBytes, DL, PtrVT, true),
9893                              DAG.getConstant(0, DL, PtrVT, true),
9894                              Glue, DL);
9895   Glue = Chain.getValue(1);
9896 
9897   // Assign locations to each value returned by this call.
9898   SmallVector<CCValAssign, 16> RVLocs;
9899   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
9900   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
9901 
9902   // Copy all of the result registers out of their specified physreg.
9903   for (auto &VA : RVLocs) {
9904     // Copy the value out
9905     SDValue RetValue =
9906         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
9907     // Glue the RetValue to the end of the call sequence
9908     Chain = RetValue.getValue(1);
9909     Glue = RetValue.getValue(2);
9910 
9911     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9912       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
9913       SDValue RetValue2 =
9914           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
9915       Chain = RetValue2.getValue(1);
9916       Glue = RetValue2.getValue(2);
9917       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
9918                              RetValue2);
9919     }
9920 
9921     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
9922 
9923     InVals.push_back(RetValue);
9924   }
9925 
9926   return Chain;
9927 }
9928 
9929 bool RISCVTargetLowering::CanLowerReturn(
9930     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
9931     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
9932   SmallVector<CCValAssign, 16> RVLocs;
9933   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
9934 
9935   Optional<unsigned> FirstMaskArgument;
9936   if (Subtarget.hasVInstructions())
9937     FirstMaskArgument = preAssignMask(Outs);
9938 
9939   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9940     MVT VT = Outs[i].VT;
9941     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9942     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9943     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
9944                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
9945                  *this, FirstMaskArgument))
9946       return false;
9947   }
9948   return true;
9949 }
9950 
9951 SDValue
9952 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
9953                                  bool IsVarArg,
9954                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
9955                                  const SmallVectorImpl<SDValue> &OutVals,
9956                                  const SDLoc &DL, SelectionDAG &DAG) const {
9957   const MachineFunction &MF = DAG.getMachineFunction();
9958   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9959 
9960   // Stores the assignment of the return value to a location.
9961   SmallVector<CCValAssign, 16> RVLocs;
9962 
9963   // Info about the registers and stack slot.
9964   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9965                  *DAG.getContext());
9966 
9967   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9968                     nullptr, CC_RISCV);
9969 
9970   if (CallConv == CallingConv::GHC && !RVLocs.empty())
9971     report_fatal_error("GHC functions return void only");
9972 
9973   SDValue Glue;
9974   SmallVector<SDValue, 4> RetOps(1, Chain);
9975 
9976   // Copy the result values into the output registers.
9977   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9978     SDValue Val = OutVals[i];
9979     CCValAssign &VA = RVLocs[i];
9980     assert(VA.isRegLoc() && "Can only return in registers!");
9981 
9982     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9983       // Handle returning f64 on RV32D with a soft float ABI.
9984       assert(VA.isRegLoc() && "Expected return via registers");
9985       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9986                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
9987       SDValue Lo = SplitF64.getValue(0);
9988       SDValue Hi = SplitF64.getValue(1);
9989       Register RegLo = VA.getLocReg();
9990       assert(RegLo < RISCV::X31 && "Invalid register pair");
9991       Register RegHi = RegLo + 1;
9992 
9993       if (STI.isRegisterReservedByUser(RegLo) ||
9994           STI.isRegisterReservedByUser(RegHi))
9995         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9996             MF.getFunction(),
9997             "Return value register required, but has been reserved."});
9998 
9999       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10000       Glue = Chain.getValue(1);
10001       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10002       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10003       Glue = Chain.getValue(1);
10004       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10005     } else {
10006       // Handle a 'normal' return.
10007       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10008       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10009 
10010       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10011         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10012             MF.getFunction(),
10013             "Return value register required, but has been reserved."});
10014 
10015       // Guarantee that all emitted copies are stuck together.
10016       Glue = Chain.getValue(1);
10017       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10018     }
10019   }
10020 
10021   RetOps[0] = Chain; // Update chain.
10022 
10023   // Add the glue node if we have it.
10024   if (Glue.getNode()) {
10025     RetOps.push_back(Glue);
10026   }
10027 
10028   unsigned RetOpc = RISCVISD::RET_FLAG;
10029   // Interrupt service routines use different return instructions.
10030   const Function &Func = DAG.getMachineFunction().getFunction();
10031   if (Func.hasFnAttribute("interrupt")) {
10032     if (!Func.getReturnType()->isVoidTy())
10033       report_fatal_error(
10034           "Functions with the interrupt attribute must have void return type!");
10035 
10036     MachineFunction &MF = DAG.getMachineFunction();
10037     StringRef Kind =
10038       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10039 
10040     if (Kind == "user")
10041       RetOpc = RISCVISD::URET_FLAG;
10042     else if (Kind == "supervisor")
10043       RetOpc = RISCVISD::SRET_FLAG;
10044     else
10045       RetOpc = RISCVISD::MRET_FLAG;
10046   }
10047 
10048   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10049 }
10050 
10051 void RISCVTargetLowering::validateCCReservedRegs(
10052     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10053     MachineFunction &MF) const {
10054   const Function &F = MF.getFunction();
10055   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10056 
10057   if (llvm::any_of(Regs, [&STI](auto Reg) {
10058         return STI.isRegisterReservedByUser(Reg.first);
10059       }))
10060     F.getContext().diagnose(DiagnosticInfoUnsupported{
10061         F, "Argument register required, but has been reserved."});
10062 }
10063 
10064 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10065   return CI->isTailCall();
10066 }
10067 
10068 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10069 #define NODE_NAME_CASE(NODE)                                                   \
10070   case RISCVISD::NODE:                                                         \
10071     return "RISCVISD::" #NODE;
10072   // clang-format off
10073   switch ((RISCVISD::NodeType)Opcode) {
10074   case RISCVISD::FIRST_NUMBER:
10075     break;
10076   NODE_NAME_CASE(RET_FLAG)
10077   NODE_NAME_CASE(URET_FLAG)
10078   NODE_NAME_CASE(SRET_FLAG)
10079   NODE_NAME_CASE(MRET_FLAG)
10080   NODE_NAME_CASE(CALL)
10081   NODE_NAME_CASE(SELECT_CC)
10082   NODE_NAME_CASE(BR_CC)
10083   NODE_NAME_CASE(BuildPairF64)
10084   NODE_NAME_CASE(SplitF64)
10085   NODE_NAME_CASE(TAIL)
10086   NODE_NAME_CASE(MULHSU)
10087   NODE_NAME_CASE(SLLW)
10088   NODE_NAME_CASE(SRAW)
10089   NODE_NAME_CASE(SRLW)
10090   NODE_NAME_CASE(DIVW)
10091   NODE_NAME_CASE(DIVUW)
10092   NODE_NAME_CASE(REMUW)
10093   NODE_NAME_CASE(ROLW)
10094   NODE_NAME_CASE(RORW)
10095   NODE_NAME_CASE(CLZW)
10096   NODE_NAME_CASE(CTZW)
10097   NODE_NAME_CASE(FSLW)
10098   NODE_NAME_CASE(FSRW)
10099   NODE_NAME_CASE(FSL)
10100   NODE_NAME_CASE(FSR)
10101   NODE_NAME_CASE(FMV_H_X)
10102   NODE_NAME_CASE(FMV_X_ANYEXTH)
10103   NODE_NAME_CASE(FMV_W_X_RV64)
10104   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10105   NODE_NAME_CASE(FCVT_X)
10106   NODE_NAME_CASE(FCVT_XU)
10107   NODE_NAME_CASE(FCVT_W_RV64)
10108   NODE_NAME_CASE(FCVT_WU_RV64)
10109   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10110   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10111   NODE_NAME_CASE(READ_CYCLE_WIDE)
10112   NODE_NAME_CASE(GREV)
10113   NODE_NAME_CASE(GREVW)
10114   NODE_NAME_CASE(GORC)
10115   NODE_NAME_CASE(GORCW)
10116   NODE_NAME_CASE(SHFL)
10117   NODE_NAME_CASE(SHFLW)
10118   NODE_NAME_CASE(UNSHFL)
10119   NODE_NAME_CASE(UNSHFLW)
10120   NODE_NAME_CASE(BFP)
10121   NODE_NAME_CASE(BFPW)
10122   NODE_NAME_CASE(BCOMPRESS)
10123   NODE_NAME_CASE(BCOMPRESSW)
10124   NODE_NAME_CASE(BDECOMPRESS)
10125   NODE_NAME_CASE(BDECOMPRESSW)
10126   NODE_NAME_CASE(VMV_V_X_VL)
10127   NODE_NAME_CASE(VFMV_V_F_VL)
10128   NODE_NAME_CASE(VMV_X_S)
10129   NODE_NAME_CASE(VMV_S_X_VL)
10130   NODE_NAME_CASE(VFMV_S_F_VL)
10131   NODE_NAME_CASE(SPLAT_VECTOR_I64)
10132   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10133   NODE_NAME_CASE(READ_VLENB)
10134   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10135   NODE_NAME_CASE(VSLIDEUP_VL)
10136   NODE_NAME_CASE(VSLIDE1UP_VL)
10137   NODE_NAME_CASE(VSLIDEDOWN_VL)
10138   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10139   NODE_NAME_CASE(VID_VL)
10140   NODE_NAME_CASE(VFNCVT_ROD_VL)
10141   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10142   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10143   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10144   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10145   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10146   NODE_NAME_CASE(VECREDUCE_AND_VL)
10147   NODE_NAME_CASE(VECREDUCE_OR_VL)
10148   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10149   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10150   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10151   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10152   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10153   NODE_NAME_CASE(ADD_VL)
10154   NODE_NAME_CASE(AND_VL)
10155   NODE_NAME_CASE(MUL_VL)
10156   NODE_NAME_CASE(OR_VL)
10157   NODE_NAME_CASE(SDIV_VL)
10158   NODE_NAME_CASE(SHL_VL)
10159   NODE_NAME_CASE(SREM_VL)
10160   NODE_NAME_CASE(SRA_VL)
10161   NODE_NAME_CASE(SRL_VL)
10162   NODE_NAME_CASE(SUB_VL)
10163   NODE_NAME_CASE(UDIV_VL)
10164   NODE_NAME_CASE(UREM_VL)
10165   NODE_NAME_CASE(XOR_VL)
10166   NODE_NAME_CASE(SADDSAT_VL)
10167   NODE_NAME_CASE(UADDSAT_VL)
10168   NODE_NAME_CASE(SSUBSAT_VL)
10169   NODE_NAME_CASE(USUBSAT_VL)
10170   NODE_NAME_CASE(FADD_VL)
10171   NODE_NAME_CASE(FSUB_VL)
10172   NODE_NAME_CASE(FMUL_VL)
10173   NODE_NAME_CASE(FDIV_VL)
10174   NODE_NAME_CASE(FNEG_VL)
10175   NODE_NAME_CASE(FABS_VL)
10176   NODE_NAME_CASE(FSQRT_VL)
10177   NODE_NAME_CASE(FMA_VL)
10178   NODE_NAME_CASE(FCOPYSIGN_VL)
10179   NODE_NAME_CASE(SMIN_VL)
10180   NODE_NAME_CASE(SMAX_VL)
10181   NODE_NAME_CASE(UMIN_VL)
10182   NODE_NAME_CASE(UMAX_VL)
10183   NODE_NAME_CASE(FMINNUM_VL)
10184   NODE_NAME_CASE(FMAXNUM_VL)
10185   NODE_NAME_CASE(MULHS_VL)
10186   NODE_NAME_CASE(MULHU_VL)
10187   NODE_NAME_CASE(FP_TO_SINT_VL)
10188   NODE_NAME_CASE(FP_TO_UINT_VL)
10189   NODE_NAME_CASE(SINT_TO_FP_VL)
10190   NODE_NAME_CASE(UINT_TO_FP_VL)
10191   NODE_NAME_CASE(FP_EXTEND_VL)
10192   NODE_NAME_CASE(FP_ROUND_VL)
10193   NODE_NAME_CASE(VWMUL_VL)
10194   NODE_NAME_CASE(VWMULU_VL)
10195   NODE_NAME_CASE(VWMULSU_VL)
10196   NODE_NAME_CASE(VWADDU_VL)
10197   NODE_NAME_CASE(SETCC_VL)
10198   NODE_NAME_CASE(VSELECT_VL)
10199   NODE_NAME_CASE(VP_MERGE_VL)
10200   NODE_NAME_CASE(VMAND_VL)
10201   NODE_NAME_CASE(VMOR_VL)
10202   NODE_NAME_CASE(VMXOR_VL)
10203   NODE_NAME_CASE(VMCLR_VL)
10204   NODE_NAME_CASE(VMSET_VL)
10205   NODE_NAME_CASE(VRGATHER_VX_VL)
10206   NODE_NAME_CASE(VRGATHER_VV_VL)
10207   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10208   NODE_NAME_CASE(VSEXT_VL)
10209   NODE_NAME_CASE(VZEXT_VL)
10210   NODE_NAME_CASE(VCPOP_VL)
10211   NODE_NAME_CASE(VLE_VL)
10212   NODE_NAME_CASE(VSE_VL)
10213   NODE_NAME_CASE(READ_CSR)
10214   NODE_NAME_CASE(WRITE_CSR)
10215   NODE_NAME_CASE(SWAP_CSR)
10216   }
10217   // clang-format on
10218   return nullptr;
10219 #undef NODE_NAME_CASE
10220 }
10221 
10222 /// getConstraintType - Given a constraint letter, return the type of
10223 /// constraint it is for this target.
10224 RISCVTargetLowering::ConstraintType
10225 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10226   if (Constraint.size() == 1) {
10227     switch (Constraint[0]) {
10228     default:
10229       break;
10230     case 'f':
10231       return C_RegisterClass;
10232     case 'I':
10233     case 'J':
10234     case 'K':
10235       return C_Immediate;
10236     case 'A':
10237       return C_Memory;
10238     case 'S': // A symbolic address
10239       return C_Other;
10240     }
10241   } else {
10242     if (Constraint == "vr" || Constraint == "vm")
10243       return C_RegisterClass;
10244   }
10245   return TargetLowering::getConstraintType(Constraint);
10246 }
10247 
10248 std::pair<unsigned, const TargetRegisterClass *>
10249 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10250                                                   StringRef Constraint,
10251                                                   MVT VT) const {
10252   // First, see if this is a constraint that directly corresponds to a
10253   // RISCV register class.
10254   if (Constraint.size() == 1) {
10255     switch (Constraint[0]) {
10256     case 'r':
10257       // TODO: Support fixed vectors up to XLen for P extension?
10258       if (VT.isVector())
10259         break;
10260       return std::make_pair(0U, &RISCV::GPRRegClass);
10261     case 'f':
10262       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10263         return std::make_pair(0U, &RISCV::FPR16RegClass);
10264       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10265         return std::make_pair(0U, &RISCV::FPR32RegClass);
10266       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10267         return std::make_pair(0U, &RISCV::FPR64RegClass);
10268       break;
10269     default:
10270       break;
10271     }
10272   } else if (Constraint == "vr") {
10273     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10274                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10275       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10276         return std::make_pair(0U, RC);
10277     }
10278   } else if (Constraint == "vm") {
10279     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10280       return std::make_pair(0U, &RISCV::VMV0RegClass);
10281   }
10282 
10283   // Clang will correctly decode the usage of register name aliases into their
10284   // official names. However, other frontends like `rustc` do not. This allows
10285   // users of these frontends to use the ABI names for registers in LLVM-style
10286   // register constraints.
10287   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10288                                .Case("{zero}", RISCV::X0)
10289                                .Case("{ra}", RISCV::X1)
10290                                .Case("{sp}", RISCV::X2)
10291                                .Case("{gp}", RISCV::X3)
10292                                .Case("{tp}", RISCV::X4)
10293                                .Case("{t0}", RISCV::X5)
10294                                .Case("{t1}", RISCV::X6)
10295                                .Case("{t2}", RISCV::X7)
10296                                .Cases("{s0}", "{fp}", RISCV::X8)
10297                                .Case("{s1}", RISCV::X9)
10298                                .Case("{a0}", RISCV::X10)
10299                                .Case("{a1}", RISCV::X11)
10300                                .Case("{a2}", RISCV::X12)
10301                                .Case("{a3}", RISCV::X13)
10302                                .Case("{a4}", RISCV::X14)
10303                                .Case("{a5}", RISCV::X15)
10304                                .Case("{a6}", RISCV::X16)
10305                                .Case("{a7}", RISCV::X17)
10306                                .Case("{s2}", RISCV::X18)
10307                                .Case("{s3}", RISCV::X19)
10308                                .Case("{s4}", RISCV::X20)
10309                                .Case("{s5}", RISCV::X21)
10310                                .Case("{s6}", RISCV::X22)
10311                                .Case("{s7}", RISCV::X23)
10312                                .Case("{s8}", RISCV::X24)
10313                                .Case("{s9}", RISCV::X25)
10314                                .Case("{s10}", RISCV::X26)
10315                                .Case("{s11}", RISCV::X27)
10316                                .Case("{t3}", RISCV::X28)
10317                                .Case("{t4}", RISCV::X29)
10318                                .Case("{t5}", RISCV::X30)
10319                                .Case("{t6}", RISCV::X31)
10320                                .Default(RISCV::NoRegister);
10321   if (XRegFromAlias != RISCV::NoRegister)
10322     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10323 
10324   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10325   // TableGen record rather than the AsmName to choose registers for InlineAsm
10326   // constraints, plus we want to match those names to the widest floating point
10327   // register type available, manually select floating point registers here.
10328   //
10329   // The second case is the ABI name of the register, so that frontends can also
10330   // use the ABI names in register constraint lists.
10331   if (Subtarget.hasStdExtF()) {
10332     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10333                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10334                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10335                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10336                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10337                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10338                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10339                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10340                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10341                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10342                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10343                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10344                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10345                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10346                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10347                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10348                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10349                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10350                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10351                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10352                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10353                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10354                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10355                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10356                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10357                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10358                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10359                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10360                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10361                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10362                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10363                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10364                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10365                         .Default(RISCV::NoRegister);
10366     if (FReg != RISCV::NoRegister) {
10367       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10368       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10369         unsigned RegNo = FReg - RISCV::F0_F;
10370         unsigned DReg = RISCV::F0_D + RegNo;
10371         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10372       }
10373       if (VT == MVT::f32 || VT == MVT::Other)
10374         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10375       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10376         unsigned RegNo = FReg - RISCV::F0_F;
10377         unsigned HReg = RISCV::F0_H + RegNo;
10378         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10379       }
10380     }
10381   }
10382 
10383   if (Subtarget.hasVInstructions()) {
10384     Register VReg = StringSwitch<Register>(Constraint.lower())
10385                         .Case("{v0}", RISCV::V0)
10386                         .Case("{v1}", RISCV::V1)
10387                         .Case("{v2}", RISCV::V2)
10388                         .Case("{v3}", RISCV::V3)
10389                         .Case("{v4}", RISCV::V4)
10390                         .Case("{v5}", RISCV::V5)
10391                         .Case("{v6}", RISCV::V6)
10392                         .Case("{v7}", RISCV::V7)
10393                         .Case("{v8}", RISCV::V8)
10394                         .Case("{v9}", RISCV::V9)
10395                         .Case("{v10}", RISCV::V10)
10396                         .Case("{v11}", RISCV::V11)
10397                         .Case("{v12}", RISCV::V12)
10398                         .Case("{v13}", RISCV::V13)
10399                         .Case("{v14}", RISCV::V14)
10400                         .Case("{v15}", RISCV::V15)
10401                         .Case("{v16}", RISCV::V16)
10402                         .Case("{v17}", RISCV::V17)
10403                         .Case("{v18}", RISCV::V18)
10404                         .Case("{v19}", RISCV::V19)
10405                         .Case("{v20}", RISCV::V20)
10406                         .Case("{v21}", RISCV::V21)
10407                         .Case("{v22}", RISCV::V22)
10408                         .Case("{v23}", RISCV::V23)
10409                         .Case("{v24}", RISCV::V24)
10410                         .Case("{v25}", RISCV::V25)
10411                         .Case("{v26}", RISCV::V26)
10412                         .Case("{v27}", RISCV::V27)
10413                         .Case("{v28}", RISCV::V28)
10414                         .Case("{v29}", RISCV::V29)
10415                         .Case("{v30}", RISCV::V30)
10416                         .Case("{v31}", RISCV::V31)
10417                         .Default(RISCV::NoRegister);
10418     if (VReg != RISCV::NoRegister) {
10419       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10420         return std::make_pair(VReg, &RISCV::VMRegClass);
10421       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10422         return std::make_pair(VReg, &RISCV::VRRegClass);
10423       for (const auto *RC :
10424            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10425         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10426           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10427           return std::make_pair(VReg, RC);
10428         }
10429       }
10430     }
10431   }
10432 
10433   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10434 }
10435 
10436 unsigned
10437 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10438   // Currently only support length 1 constraints.
10439   if (ConstraintCode.size() == 1) {
10440     switch (ConstraintCode[0]) {
10441     case 'A':
10442       return InlineAsm::Constraint_A;
10443     default:
10444       break;
10445     }
10446   }
10447 
10448   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10449 }
10450 
10451 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10452     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10453     SelectionDAG &DAG) const {
10454   // Currently only support length 1 constraints.
10455   if (Constraint.length() == 1) {
10456     switch (Constraint[0]) {
10457     case 'I':
10458       // Validate & create a 12-bit signed immediate operand.
10459       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10460         uint64_t CVal = C->getSExtValue();
10461         if (isInt<12>(CVal))
10462           Ops.push_back(
10463               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10464       }
10465       return;
10466     case 'J':
10467       // Validate & create an integer zero operand.
10468       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10469         if (C->getZExtValue() == 0)
10470           Ops.push_back(
10471               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10472       return;
10473     case 'K':
10474       // Validate & create a 5-bit unsigned immediate operand.
10475       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10476         uint64_t CVal = C->getZExtValue();
10477         if (isUInt<5>(CVal))
10478           Ops.push_back(
10479               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10480       }
10481       return;
10482     case 'S':
10483       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10484         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10485                                                  GA->getValueType(0)));
10486       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10487         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10488                                                 BA->getValueType(0)));
10489       }
10490       return;
10491     default:
10492       break;
10493     }
10494   }
10495   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10496 }
10497 
10498 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10499                                                    Instruction *Inst,
10500                                                    AtomicOrdering Ord) const {
10501   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10502     return Builder.CreateFence(Ord);
10503   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10504     return Builder.CreateFence(AtomicOrdering::Release);
10505   return nullptr;
10506 }
10507 
10508 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10509                                                     Instruction *Inst,
10510                                                     AtomicOrdering Ord) const {
10511   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10512     return Builder.CreateFence(AtomicOrdering::Acquire);
10513   return nullptr;
10514 }
10515 
10516 TargetLowering::AtomicExpansionKind
10517 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10518   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
10519   // point operations can't be used in an lr/sc sequence without breaking the
10520   // forward-progress guarantee.
10521   if (AI->isFloatingPointOperation())
10522     return AtomicExpansionKind::CmpXChg;
10523 
10524   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
10525   if (Size == 8 || Size == 16)
10526     return AtomicExpansionKind::MaskedIntrinsic;
10527   return AtomicExpansionKind::None;
10528 }
10529 
10530 static Intrinsic::ID
10531 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
10532   if (XLen == 32) {
10533     switch (BinOp) {
10534     default:
10535       llvm_unreachable("Unexpected AtomicRMW BinOp");
10536     case AtomicRMWInst::Xchg:
10537       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
10538     case AtomicRMWInst::Add:
10539       return Intrinsic::riscv_masked_atomicrmw_add_i32;
10540     case AtomicRMWInst::Sub:
10541       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
10542     case AtomicRMWInst::Nand:
10543       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
10544     case AtomicRMWInst::Max:
10545       return Intrinsic::riscv_masked_atomicrmw_max_i32;
10546     case AtomicRMWInst::Min:
10547       return Intrinsic::riscv_masked_atomicrmw_min_i32;
10548     case AtomicRMWInst::UMax:
10549       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
10550     case AtomicRMWInst::UMin:
10551       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
10552     }
10553   }
10554 
10555   if (XLen == 64) {
10556     switch (BinOp) {
10557     default:
10558       llvm_unreachable("Unexpected AtomicRMW BinOp");
10559     case AtomicRMWInst::Xchg:
10560       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
10561     case AtomicRMWInst::Add:
10562       return Intrinsic::riscv_masked_atomicrmw_add_i64;
10563     case AtomicRMWInst::Sub:
10564       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
10565     case AtomicRMWInst::Nand:
10566       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
10567     case AtomicRMWInst::Max:
10568       return Intrinsic::riscv_masked_atomicrmw_max_i64;
10569     case AtomicRMWInst::Min:
10570       return Intrinsic::riscv_masked_atomicrmw_min_i64;
10571     case AtomicRMWInst::UMax:
10572       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
10573     case AtomicRMWInst::UMin:
10574       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
10575     }
10576   }
10577 
10578   llvm_unreachable("Unexpected XLen\n");
10579 }
10580 
10581 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
10582     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
10583     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
10584   unsigned XLen = Subtarget.getXLen();
10585   Value *Ordering =
10586       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
10587   Type *Tys[] = {AlignedAddr->getType()};
10588   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
10589       AI->getModule(),
10590       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
10591 
10592   if (XLen == 64) {
10593     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10594     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10595     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10596   }
10597 
10598   Value *Result;
10599 
10600   // Must pass the shift amount needed to sign extend the loaded value prior
10601   // to performing a signed comparison for min/max. ShiftAmt is the number of
10602   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
10603   // is the number of bits to left+right shift the value in order to
10604   // sign-extend.
10605   if (AI->getOperation() == AtomicRMWInst::Min ||
10606       AI->getOperation() == AtomicRMWInst::Max) {
10607     const DataLayout &DL = AI->getModule()->getDataLayout();
10608     unsigned ValWidth =
10609         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
10610     Value *SextShamt =
10611         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
10612     Result = Builder.CreateCall(LrwOpScwLoop,
10613                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10614   } else {
10615     Result =
10616         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10617   }
10618 
10619   if (XLen == 64)
10620     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10621   return Result;
10622 }
10623 
10624 TargetLowering::AtomicExpansionKind
10625 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
10626     AtomicCmpXchgInst *CI) const {
10627   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
10628   if (Size == 8 || Size == 16)
10629     return AtomicExpansionKind::MaskedIntrinsic;
10630   return AtomicExpansionKind::None;
10631 }
10632 
10633 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
10634     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
10635     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
10636   unsigned XLen = Subtarget.getXLen();
10637   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
10638   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
10639   if (XLen == 64) {
10640     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10641     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10642     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10643     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
10644   }
10645   Type *Tys[] = {AlignedAddr->getType()};
10646   Function *MaskedCmpXchg =
10647       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
10648   Value *Result = Builder.CreateCall(
10649       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
10650   if (XLen == 64)
10651     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10652   return Result;
10653 }
10654 
10655 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
10656   return false;
10657 }
10658 
10659 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
10660                                                EVT VT) const {
10661   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
10662     return false;
10663 
10664   switch (FPVT.getSimpleVT().SimpleTy) {
10665   case MVT::f16:
10666     return Subtarget.hasStdExtZfh();
10667   case MVT::f32:
10668     return Subtarget.hasStdExtF();
10669   case MVT::f64:
10670     return Subtarget.hasStdExtD();
10671   default:
10672     return false;
10673   }
10674 }
10675 
10676 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
10677   // If we are using the small code model, we can reduce size of jump table
10678   // entry to 4 bytes.
10679   if (Subtarget.is64Bit() && !isPositionIndependent() &&
10680       getTargetMachine().getCodeModel() == CodeModel::Small) {
10681     return MachineJumpTableInfo::EK_Custom32;
10682   }
10683   return TargetLowering::getJumpTableEncoding();
10684 }
10685 
10686 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
10687     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
10688     unsigned uid, MCContext &Ctx) const {
10689   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
10690          getTargetMachine().getCodeModel() == CodeModel::Small);
10691   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
10692 }
10693 
10694 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
10695                                                      EVT VT) const {
10696   VT = VT.getScalarType();
10697 
10698   if (!VT.isSimple())
10699     return false;
10700 
10701   switch (VT.getSimpleVT().SimpleTy) {
10702   case MVT::f16:
10703     return Subtarget.hasStdExtZfh();
10704   case MVT::f32:
10705     return Subtarget.hasStdExtF();
10706   case MVT::f64:
10707     return Subtarget.hasStdExtD();
10708   default:
10709     break;
10710   }
10711 
10712   return false;
10713 }
10714 
10715 Register RISCVTargetLowering::getExceptionPointerRegister(
10716     const Constant *PersonalityFn) const {
10717   return RISCV::X10;
10718 }
10719 
10720 Register RISCVTargetLowering::getExceptionSelectorRegister(
10721     const Constant *PersonalityFn) const {
10722   return RISCV::X11;
10723 }
10724 
10725 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
10726   // Return false to suppress the unnecessary extensions if the LibCall
10727   // arguments or return value is f32 type for LP64 ABI.
10728   RISCVABI::ABI ABI = Subtarget.getTargetABI();
10729   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
10730     return false;
10731 
10732   return true;
10733 }
10734 
10735 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
10736   if (Subtarget.is64Bit() && Type == MVT::i32)
10737     return true;
10738 
10739   return IsSigned;
10740 }
10741 
10742 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
10743                                                  SDValue C) const {
10744   // Check integral scalar types.
10745   if (VT.isScalarInteger()) {
10746     // Omit the optimization if the sub target has the M extension and the data
10747     // size exceeds XLen.
10748     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
10749       return false;
10750     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
10751       // Break the MUL to a SLLI and an ADD/SUB.
10752       const APInt &Imm = ConstNode->getAPIntValue();
10753       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
10754           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
10755         return true;
10756       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
10757       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
10758           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
10759            (Imm - 8).isPowerOf2()))
10760         return true;
10761       // Omit the following optimization if the sub target has the M extension
10762       // and the data size >= XLen.
10763       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
10764         return false;
10765       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
10766       // a pair of LUI/ADDI.
10767       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
10768         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
10769         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
10770             (1 - ImmS).isPowerOf2())
10771         return true;
10772       }
10773     }
10774   }
10775 
10776   return false;
10777 }
10778 
10779 bool RISCVTargetLowering::isMulAddWithConstProfitable(
10780     const SDValue &AddNode, const SDValue &ConstNode) const {
10781   // Let the DAGCombiner decide for vectors.
10782   EVT VT = AddNode.getValueType();
10783   if (VT.isVector())
10784     return true;
10785 
10786   // Let the DAGCombiner decide for larger types.
10787   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
10788     return true;
10789 
10790   // It is worse if c1 is simm12 while c1*c2 is not.
10791   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
10792   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
10793   const APInt &C1 = C1Node->getAPIntValue();
10794   const APInt &C2 = C2Node->getAPIntValue();
10795   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
10796     return false;
10797 
10798   // Default to true and let the DAGCombiner decide.
10799   return true;
10800 }
10801 
10802 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
10803     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
10804     bool *Fast) const {
10805   if (!VT.isVector())
10806     return false;
10807 
10808   EVT ElemVT = VT.getVectorElementType();
10809   if (Alignment >= ElemVT.getStoreSize()) {
10810     if (Fast)
10811       *Fast = true;
10812     return true;
10813   }
10814 
10815   return false;
10816 }
10817 
10818 bool RISCVTargetLowering::splitValueIntoRegisterParts(
10819     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
10820     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
10821   bool IsABIRegCopy = CC.hasValue();
10822   EVT ValueVT = Val.getValueType();
10823   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10824     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
10825     // and cast to f32.
10826     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
10827     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
10828     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
10829                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
10830     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
10831     Parts[0] = Val;
10832     return true;
10833   }
10834 
10835   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10836     LLVMContext &Context = *DAG.getContext();
10837     EVT ValueEltVT = ValueVT.getVectorElementType();
10838     EVT PartEltVT = PartVT.getVectorElementType();
10839     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10840     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10841     if (PartVTBitSize % ValueVTBitSize == 0) {
10842       assert(PartVTBitSize >= ValueVTBitSize);
10843       // If the element types are different, bitcast to the same element type of
10844       // PartVT first.
10845       // Give an example here, we want copy a <vscale x 1 x i8> value to
10846       // <vscale x 4 x i16>.
10847       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
10848       // subvector, then we can bitcast to <vscale x 4 x i16>.
10849       if (ValueEltVT != PartEltVT) {
10850         if (PartVTBitSize > ValueVTBitSize) {
10851           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10852           assert(Count != 0 && "The number of element should not be zero.");
10853           EVT SameEltTypeVT =
10854               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10855           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
10856                             DAG.getUNDEF(SameEltTypeVT), Val,
10857                             DAG.getVectorIdxConstant(0, DL));
10858         }
10859         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
10860       } else {
10861         Val =
10862             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
10863                         Val, DAG.getVectorIdxConstant(0, DL));
10864       }
10865       Parts[0] = Val;
10866       return true;
10867     }
10868   }
10869   return false;
10870 }
10871 
10872 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
10873     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
10874     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
10875   bool IsABIRegCopy = CC.hasValue();
10876   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
10877     SDValue Val = Parts[0];
10878 
10879     // Cast the f32 to i32, truncate to i16, and cast back to f16.
10880     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
10881     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
10882     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
10883     return Val;
10884   }
10885 
10886   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
10887     LLVMContext &Context = *DAG.getContext();
10888     SDValue Val = Parts[0];
10889     EVT ValueEltVT = ValueVT.getVectorElementType();
10890     EVT PartEltVT = PartVT.getVectorElementType();
10891     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
10892     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
10893     if (PartVTBitSize % ValueVTBitSize == 0) {
10894       assert(PartVTBitSize >= ValueVTBitSize);
10895       EVT SameEltTypeVT = ValueVT;
10896       // If the element types are different, convert it to the same element type
10897       // of PartVT.
10898       // Give an example here, we want copy a <vscale x 1 x i8> value from
10899       // <vscale x 4 x i16>.
10900       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
10901       // then we can extract <vscale x 1 x i8>.
10902       if (ValueEltVT != PartEltVT) {
10903         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
10904         assert(Count != 0 && "The number of element should not be zero.");
10905         SameEltTypeVT =
10906             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
10907         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
10908       }
10909       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
10910                         DAG.getVectorIdxConstant(0, DL));
10911       return Val;
10912     }
10913   }
10914   return SDValue();
10915 }
10916 
10917 SDValue
10918 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
10919                                    SelectionDAG &DAG,
10920                                    SmallVectorImpl<SDNode *> &Created) const {
10921   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
10922   if (isIntDivCheap(N->getValueType(0), Attr))
10923     return SDValue(N, 0); // Lower SDIV as SDIV
10924 
10925   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
10926          "Unexpected divisor!");
10927 
10928   // Conditional move is needed, so do the transformation iff Zbt is enabled.
10929   if (!Subtarget.hasStdExtZbt())
10930     return SDValue();
10931 
10932   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
10933   // Besides, more critical path instructions will be generated when dividing
10934   // by 2. So we keep using the original DAGs for these cases.
10935   unsigned Lg2 = Divisor.countTrailingZeros();
10936   if (Lg2 == 1 || Lg2 >= 12)
10937     return SDValue();
10938 
10939   // fold (sdiv X, pow2)
10940   EVT VT = N->getValueType(0);
10941   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
10942     return SDValue();
10943 
10944   SDLoc DL(N);
10945   SDValue N0 = N->getOperand(0);
10946   SDValue Zero = DAG.getConstant(0, DL, VT);
10947   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
10948 
10949   // Add (N0 < 0) ? Pow2 - 1 : 0;
10950   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
10951   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
10952   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
10953 
10954   Created.push_back(Cmp.getNode());
10955   Created.push_back(Add.getNode());
10956   Created.push_back(Sel.getNode());
10957 
10958   // Divide by pow2.
10959   SDValue SRA =
10960       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
10961 
10962   // If we're dividing by a positive value, we're done.  Otherwise, we must
10963   // negate the result.
10964   if (Divisor.isNonNegative())
10965     return SRA;
10966 
10967   Created.push_back(SRA.getNode());
10968   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
10969 }
10970 
10971 #define GET_REGISTER_MATCHER
10972 #include "RISCVGenAsmMatcher.inc"
10973 
10974 Register
10975 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
10976                                        const MachineFunction &MF) const {
10977   Register Reg = MatchRegisterAltName(RegName);
10978   if (Reg == RISCV::NoRegister)
10979     Reg = MatchRegisterName(RegName);
10980   if (Reg == RISCV::NoRegister)
10981     report_fatal_error(
10982         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
10983   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
10984   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
10985     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
10986                              StringRef(RegName) + "\"."));
10987   return Reg;
10988 }
10989 
10990 namespace llvm {
10991 namespace RISCVVIntrinsicsTable {
10992 
10993 #define GET_RISCVVIntrinsicsTable_IMPL
10994 #include "RISCVGenSearchableTables.inc"
10995 
10996 } // namespace RISCVVIntrinsicsTable
10997 
10998 } // namespace llvm
10999