1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306   }
307 
308   if (Subtarget.hasStdExtZbt()) {
309     setOperationAction(ISD::FSHL, XLenVT, Custom);
310     setOperationAction(ISD::FSHR, XLenVT, Custom);
311     setOperationAction(ISD::SELECT, XLenVT, Legal);
312 
313     if (Subtarget.is64Bit()) {
314       setOperationAction(ISD::FSHL, MVT::i32, Custom);
315       setOperationAction(ISD::FSHR, MVT::i32, Custom);
316     }
317   } else {
318     setOperationAction(ISD::SELECT, XLenVT, Custom);
319   }
320 
321   static const ISD::CondCode FPCCToExpand[] = {
322       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
323       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
324       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
325 
326   static const ISD::NodeType FPOpToExpand[] = {
327       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
328       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
329 
330   if (Subtarget.hasStdExtZfh())
331     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
332 
333   if (Subtarget.hasStdExtZfh()) {
334     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
336     setOperationAction(ISD::LRINT, MVT::f16, Legal);
337     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
338     setOperationAction(ISD::LROUND, MVT::f16, Legal);
339     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
345     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
346     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
349     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
350     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
351     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
352     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
353     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
354     for (auto CC : FPCCToExpand)
355       setCondCodeAction(CC, MVT::f16, Expand);
356     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
357     setOperationAction(ISD::SELECT, MVT::f16, Custom);
358     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
359 
360     setOperationAction(ISD::FREM,       MVT::f16, Promote);
361     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
362     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
363     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
364     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
365     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
366     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
367     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
368     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
369     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
370     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
371     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
372     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
373     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
374     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
375     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
376     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
377     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
378 
379     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
380     // complete support for all operations in LegalizeDAG.
381 
382     // We need to custom promote this.
383     if (Subtarget.is64Bit())
384       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
385   }
386 
387   if (Subtarget.hasStdExtF()) {
388     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
389     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
390     setOperationAction(ISD::LRINT, MVT::f32, Legal);
391     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
392     setOperationAction(ISD::LROUND, MVT::f32, Legal);
393     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
395     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
396     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
397     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
398     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
399     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
400     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
401     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
402     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
404     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
405     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
406     for (auto CC : FPCCToExpand)
407       setCondCodeAction(CC, MVT::f32, Expand);
408     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
409     setOperationAction(ISD::SELECT, MVT::f32, Custom);
410     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f32, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
418     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
419 
420   if (Subtarget.hasStdExtD()) {
421     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
422     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
423     setOperationAction(ISD::LRINT, MVT::f64, Legal);
424     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
425     setOperationAction(ISD::LROUND, MVT::f64, Legal);
426     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
428     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
429     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
431     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
432     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
433     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
435     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
436     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
437     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
438     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
439     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
440     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
441     for (auto CC : FPCCToExpand)
442       setCondCodeAction(CC, MVT::f64, Expand);
443     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
444     setOperationAction(ISD::SELECT, MVT::f64, Custom);
445     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
446     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
447     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
448     for (auto Op : FPOpToExpand)
449       setOperationAction(Op, MVT::f64, Expand);
450     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
451     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
452   }
453 
454   if (Subtarget.is64Bit()) {
455     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
456     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
457     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
458     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
459   }
460 
461   if (Subtarget.hasStdExtF()) {
462     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
463     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
464 
465     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
466     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
467     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
468     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
469 
470     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
471     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
472   }
473 
474   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
475   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
476   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
477   setOperationAction(ISD::JumpTable, XLenVT, Custom);
478 
479   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
480 
481   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
482   // Unfortunately this can't be determined just from the ISA naming string.
483   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
484                      Subtarget.is64Bit() ? Legal : Custom);
485 
486   setOperationAction(ISD::TRAP, MVT::Other, Legal);
487   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
488   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
489   if (Subtarget.is64Bit())
490     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
491 
492   if (Subtarget.hasStdExtA()) {
493     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
494     setMinCmpXchgSizeInBits(32);
495   } else {
496     setMaxAtomicSizeInBitsSupported(0);
497   }
498 
499   setBooleanContents(ZeroOrOneBooleanContent);
500 
501   if (Subtarget.hasVInstructions()) {
502     setBooleanVectorContents(ZeroOrOneBooleanContent);
503 
504     setOperationAction(ISD::VSCALE, XLenVT, Custom);
505 
506     // RVV intrinsics may have illegal operands.
507     // We also need to custom legalize vmv.x.s.
508     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
509     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
510     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
511     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
512     if (Subtarget.is64Bit()) {
513       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
514     } else {
515       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
516       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
517     }
518 
519     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
520     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
521 
522     static const unsigned IntegerVPOps[] = {
523         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
524         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
525         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
526         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
527         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
528         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
529         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
530         ISD::VP_MERGE,       ISD::VP_SELECT};
531 
532     static const unsigned FloatingPointVPOps[] = {
533         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
534         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FMA,
535         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN,
536         ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,       ISD::VP_SELECT};
537 
538     if (!Subtarget.is64Bit()) {
539       // We must custom-lower certain vXi64 operations on RV32 due to the vector
540       // element type being illegal.
541       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
542       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
543 
544       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
545       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
546       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
547       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
548       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
549       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
550       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
551       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
552 
553       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
554       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
555       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
556       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
557       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
558       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
559       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
560       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
561     }
562 
563     for (MVT VT : BoolVecVTs) {
564       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
565 
566       // Mask VTs are custom-expanded into a series of standard nodes
567       setOperationAction(ISD::TRUNCATE, VT, Custom);
568       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
569       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
570       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
571 
572       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
573       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
574 
575       setOperationAction(ISD::SELECT, VT, Custom);
576       setOperationAction(ISD::SELECT_CC, VT, Expand);
577       setOperationAction(ISD::VSELECT, VT, Expand);
578       setOperationAction(ISD::VP_MERGE, VT, Expand);
579       setOperationAction(ISD::VP_SELECT, VT, Expand);
580 
581       setOperationAction(ISD::VP_AND, VT, Custom);
582       setOperationAction(ISD::VP_OR, VT, Custom);
583       setOperationAction(ISD::VP_XOR, VT, Custom);
584 
585       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
586       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
587       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
588 
589       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
590       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
591       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
592 
593       // RVV has native int->float & float->int conversions where the
594       // element type sizes are within one power-of-two of each other. Any
595       // wider distances between type sizes have to be lowered as sequences
596       // which progressively narrow the gap in stages.
597       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
598       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
599       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
600       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
601 
602       // Expand all extending loads to types larger than this, and truncating
603       // stores from types larger than this.
604       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
605         setTruncStoreAction(OtherVT, VT, Expand);
606         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
607         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
608         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
609       }
610     }
611 
612     for (MVT VT : IntVecVTs) {
613       if (VT.getVectorElementType() == MVT::i64 &&
614           !Subtarget.hasVInstructionsI64())
615         continue;
616 
617       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
618       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
619 
620       // Vectors implement MULHS/MULHU.
621       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
622       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
623 
624       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
625       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
626         setOperationAction(ISD::MULHU, VT, Expand);
627         setOperationAction(ISD::MULHS, VT, Expand);
628       }
629 
630       setOperationAction(ISD::SMIN, VT, Legal);
631       setOperationAction(ISD::SMAX, VT, Legal);
632       setOperationAction(ISD::UMIN, VT, Legal);
633       setOperationAction(ISD::UMAX, VT, Legal);
634 
635       setOperationAction(ISD::ROTL, VT, Expand);
636       setOperationAction(ISD::ROTR, VT, Expand);
637 
638       setOperationAction(ISD::CTTZ, VT, Expand);
639       setOperationAction(ISD::CTLZ, VT, Expand);
640       setOperationAction(ISD::CTPOP, VT, Expand);
641 
642       setOperationAction(ISD::BSWAP, VT, Expand);
643 
644       // Custom-lower extensions and truncations from/to mask types.
645       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
646       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
647       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
648 
649       // RVV has native int->float & float->int conversions where the
650       // element type sizes are within one power-of-two of each other. Any
651       // wider distances between type sizes have to be lowered as sequences
652       // which progressively narrow the gap in stages.
653       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
654       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
655       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
656       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
657 
658       setOperationAction(ISD::SADDSAT, VT, Legal);
659       setOperationAction(ISD::UADDSAT, VT, Legal);
660       setOperationAction(ISD::SSUBSAT, VT, Legal);
661       setOperationAction(ISD::USUBSAT, VT, Legal);
662 
663       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
664       // nodes which truncate by one power of two at a time.
665       setOperationAction(ISD::TRUNCATE, VT, Custom);
666 
667       // Custom-lower insert/extract operations to simplify patterns.
668       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
669       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
670 
671       // Custom-lower reduction operations to set up the corresponding custom
672       // nodes' operands.
673       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
674       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
675       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
676       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
677       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
678       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
679       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
680       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
681 
682       for (unsigned VPOpc : IntegerVPOps)
683         setOperationAction(VPOpc, VT, Custom);
684 
685       setOperationAction(ISD::LOAD, VT, Custom);
686       setOperationAction(ISD::STORE, VT, Custom);
687 
688       setOperationAction(ISD::MLOAD, VT, Custom);
689       setOperationAction(ISD::MSTORE, VT, Custom);
690       setOperationAction(ISD::MGATHER, VT, Custom);
691       setOperationAction(ISD::MSCATTER, VT, Custom);
692 
693       setOperationAction(ISD::VP_LOAD, VT, Custom);
694       setOperationAction(ISD::VP_STORE, VT, Custom);
695       setOperationAction(ISD::VP_GATHER, VT, Custom);
696       setOperationAction(ISD::VP_SCATTER, VT, Custom);
697 
698       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
699       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
700       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
701 
702       setOperationAction(ISD::SELECT, VT, Custom);
703       setOperationAction(ISD::SELECT_CC, VT, Expand);
704 
705       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
706       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
707 
708       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
709         setTruncStoreAction(VT, OtherVT, Expand);
710         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
711         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
712         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
713       }
714 
715       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
716       // type that can represent the value exactly.
717       if (VT.getVectorElementType() != MVT::i64) {
718         MVT FloatEltVT =
719             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
720         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
721         if (isTypeLegal(FloatVT)) {
722           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
723           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
724         }
725       }
726     }
727 
728     // Expand various CCs to best match the RVV ISA, which natively supports UNE
729     // but no other unordered comparisons, and supports all ordered comparisons
730     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
731     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
732     // and we pattern-match those back to the "original", swapping operands once
733     // more. This way we catch both operations and both "vf" and "fv" forms with
734     // fewer patterns.
735     static const ISD::CondCode VFPCCToExpand[] = {
736         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
737         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
738         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
739     };
740 
741     // Sets common operation actions on RVV floating-point vector types.
742     const auto SetCommonVFPActions = [&](MVT VT) {
743       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
744       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
745       // sizes are within one power-of-two of each other. Therefore conversions
746       // between vXf16 and vXf64 must be lowered as sequences which convert via
747       // vXf32.
748       setOperationAction(ISD::FP_ROUND, VT, Custom);
749       setOperationAction(ISD::FP_EXTEND, VT, Custom);
750       // Custom-lower insert/extract operations to simplify patterns.
751       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
752       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
753       // Expand various condition codes (explained above).
754       for (auto CC : VFPCCToExpand)
755         setCondCodeAction(CC, VT, Expand);
756 
757       setOperationAction(ISD::FMINNUM, VT, Legal);
758       setOperationAction(ISD::FMAXNUM, VT, Legal);
759 
760       setOperationAction(ISD::FTRUNC, VT, Custom);
761       setOperationAction(ISD::FCEIL, VT, Custom);
762       setOperationAction(ISD::FFLOOR, VT, Custom);
763       setOperationAction(ISD::FROUND, VT, Custom);
764 
765       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
766       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
767       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
768       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
769 
770       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
771 
772       setOperationAction(ISD::LOAD, VT, Custom);
773       setOperationAction(ISD::STORE, VT, Custom);
774 
775       setOperationAction(ISD::MLOAD, VT, Custom);
776       setOperationAction(ISD::MSTORE, VT, Custom);
777       setOperationAction(ISD::MGATHER, VT, Custom);
778       setOperationAction(ISD::MSCATTER, VT, Custom);
779 
780       setOperationAction(ISD::VP_LOAD, VT, Custom);
781       setOperationAction(ISD::VP_STORE, VT, Custom);
782       setOperationAction(ISD::VP_GATHER, VT, Custom);
783       setOperationAction(ISD::VP_SCATTER, VT, Custom);
784 
785       setOperationAction(ISD::SELECT, VT, Custom);
786       setOperationAction(ISD::SELECT_CC, VT, Expand);
787 
788       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
789       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
790       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
791 
792       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
793 
794       for (unsigned VPOpc : FloatingPointVPOps)
795         setOperationAction(VPOpc, VT, Custom);
796     };
797 
798     // Sets common extload/truncstore actions on RVV floating-point vector
799     // types.
800     const auto SetCommonVFPExtLoadTruncStoreActions =
801         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
802           for (auto SmallVT : SmallerVTs) {
803             setTruncStoreAction(VT, SmallVT, Expand);
804             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
805           }
806         };
807 
808     if (Subtarget.hasVInstructionsF16())
809       for (MVT VT : F16VecVTs)
810         SetCommonVFPActions(VT);
811 
812     for (MVT VT : F32VecVTs) {
813       if (Subtarget.hasVInstructionsF32())
814         SetCommonVFPActions(VT);
815       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
816     }
817 
818     for (MVT VT : F64VecVTs) {
819       if (Subtarget.hasVInstructionsF64())
820         SetCommonVFPActions(VT);
821       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
822       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
823     }
824 
825     if (Subtarget.useRVVForFixedLengthVectors()) {
826       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
827         if (!useRVVForFixedLengthVectorVT(VT))
828           continue;
829 
830         // By default everything must be expanded.
831         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
832           setOperationAction(Op, VT, Expand);
833         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
834           setTruncStoreAction(VT, OtherVT, Expand);
835           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
836           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
837           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
838         }
839 
840         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
841         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
842         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
843 
844         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
845         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
846 
847         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
848         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
849 
850         setOperationAction(ISD::LOAD, VT, Custom);
851         setOperationAction(ISD::STORE, VT, Custom);
852 
853         setOperationAction(ISD::SETCC, VT, Custom);
854 
855         setOperationAction(ISD::SELECT, VT, Custom);
856 
857         setOperationAction(ISD::TRUNCATE, VT, Custom);
858 
859         setOperationAction(ISD::BITCAST, VT, Custom);
860 
861         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
862         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
863         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
864 
865         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
866         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
867         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
868 
869         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
870         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
871         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
872         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
873 
874         // Operations below are different for between masks and other vectors.
875         if (VT.getVectorElementType() == MVT::i1) {
876           setOperationAction(ISD::VP_AND, VT, Custom);
877           setOperationAction(ISD::VP_OR, VT, Custom);
878           setOperationAction(ISD::VP_XOR, VT, Custom);
879           setOperationAction(ISD::AND, VT, Custom);
880           setOperationAction(ISD::OR, VT, Custom);
881           setOperationAction(ISD::XOR, VT, Custom);
882           continue;
883         }
884 
885         // Use SPLAT_VECTOR to prevent type legalization from destroying the
886         // splats when type legalizing i64 scalar on RV32.
887         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
888         // improvements first.
889         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
890           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
891           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
892         }
893 
894         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
895         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
896 
897         setOperationAction(ISD::MLOAD, VT, Custom);
898         setOperationAction(ISD::MSTORE, VT, Custom);
899         setOperationAction(ISD::MGATHER, VT, Custom);
900         setOperationAction(ISD::MSCATTER, VT, Custom);
901 
902         setOperationAction(ISD::VP_LOAD, VT, Custom);
903         setOperationAction(ISD::VP_STORE, VT, Custom);
904         setOperationAction(ISD::VP_GATHER, VT, Custom);
905         setOperationAction(ISD::VP_SCATTER, VT, Custom);
906 
907         setOperationAction(ISD::ADD, VT, Custom);
908         setOperationAction(ISD::MUL, VT, Custom);
909         setOperationAction(ISD::SUB, VT, Custom);
910         setOperationAction(ISD::AND, VT, Custom);
911         setOperationAction(ISD::OR, VT, Custom);
912         setOperationAction(ISD::XOR, VT, Custom);
913         setOperationAction(ISD::SDIV, VT, Custom);
914         setOperationAction(ISD::SREM, VT, Custom);
915         setOperationAction(ISD::UDIV, VT, Custom);
916         setOperationAction(ISD::UREM, VT, Custom);
917         setOperationAction(ISD::SHL, VT, Custom);
918         setOperationAction(ISD::SRA, VT, Custom);
919         setOperationAction(ISD::SRL, VT, Custom);
920 
921         setOperationAction(ISD::SMIN, VT, Custom);
922         setOperationAction(ISD::SMAX, VT, Custom);
923         setOperationAction(ISD::UMIN, VT, Custom);
924         setOperationAction(ISD::UMAX, VT, Custom);
925         setOperationAction(ISD::ABS,  VT, Custom);
926 
927         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
928         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
929           setOperationAction(ISD::MULHS, VT, Custom);
930           setOperationAction(ISD::MULHU, VT, Custom);
931         }
932 
933         setOperationAction(ISD::SADDSAT, VT, Custom);
934         setOperationAction(ISD::UADDSAT, VT, Custom);
935         setOperationAction(ISD::SSUBSAT, VT, Custom);
936         setOperationAction(ISD::USUBSAT, VT, Custom);
937 
938         setOperationAction(ISD::VSELECT, VT, Custom);
939         setOperationAction(ISD::SELECT_CC, VT, Expand);
940 
941         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
942         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
943         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
944 
945         // Custom-lower reduction operations to set up the corresponding custom
946         // nodes' operands.
947         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
948         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
949         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
950         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
951         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
952 
953         for (unsigned VPOpc : IntegerVPOps)
954           setOperationAction(VPOpc, VT, Custom);
955 
956         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
957         // type that can represent the value exactly.
958         if (VT.getVectorElementType() != MVT::i64) {
959           MVT FloatEltVT =
960               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
961           EVT FloatVT =
962               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
963           if (isTypeLegal(FloatVT)) {
964             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
965             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
966           }
967         }
968       }
969 
970       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
971         if (!useRVVForFixedLengthVectorVT(VT))
972           continue;
973 
974         // By default everything must be expanded.
975         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
976           setOperationAction(Op, VT, Expand);
977         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
978           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
979           setTruncStoreAction(VT, OtherVT, Expand);
980         }
981 
982         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
983         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
984         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
985 
986         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
987         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
988         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
989         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
990         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
991 
992         setOperationAction(ISD::LOAD, VT, Custom);
993         setOperationAction(ISD::STORE, VT, Custom);
994         setOperationAction(ISD::MLOAD, VT, Custom);
995         setOperationAction(ISD::MSTORE, VT, Custom);
996         setOperationAction(ISD::MGATHER, VT, Custom);
997         setOperationAction(ISD::MSCATTER, VT, Custom);
998 
999         setOperationAction(ISD::VP_LOAD, VT, Custom);
1000         setOperationAction(ISD::VP_STORE, VT, Custom);
1001         setOperationAction(ISD::VP_GATHER, VT, Custom);
1002         setOperationAction(ISD::VP_SCATTER, VT, Custom);
1003 
1004         setOperationAction(ISD::FADD, VT, Custom);
1005         setOperationAction(ISD::FSUB, VT, Custom);
1006         setOperationAction(ISD::FMUL, VT, Custom);
1007         setOperationAction(ISD::FDIV, VT, Custom);
1008         setOperationAction(ISD::FNEG, VT, Custom);
1009         setOperationAction(ISD::FABS, VT, Custom);
1010         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1011         setOperationAction(ISD::FSQRT, VT, Custom);
1012         setOperationAction(ISD::FMA, VT, Custom);
1013         setOperationAction(ISD::FMINNUM, VT, Custom);
1014         setOperationAction(ISD::FMAXNUM, VT, Custom);
1015 
1016         setOperationAction(ISD::FP_ROUND, VT, Custom);
1017         setOperationAction(ISD::FP_EXTEND, VT, Custom);
1018 
1019         setOperationAction(ISD::FTRUNC, VT, Custom);
1020         setOperationAction(ISD::FCEIL, VT, Custom);
1021         setOperationAction(ISD::FFLOOR, VT, Custom);
1022         setOperationAction(ISD::FROUND, VT, Custom);
1023 
1024         for (auto CC : VFPCCToExpand)
1025           setCondCodeAction(CC, VT, Expand);
1026 
1027         setOperationAction(ISD::VSELECT, VT, Custom);
1028         setOperationAction(ISD::SELECT, VT, Custom);
1029         setOperationAction(ISD::SELECT_CC, VT, Expand);
1030 
1031         setOperationAction(ISD::BITCAST, VT, Custom);
1032 
1033         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1034         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1035         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1036         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1037 
1038         for (unsigned VPOpc : FloatingPointVPOps)
1039           setOperationAction(VPOpc, VT, Custom);
1040       }
1041 
1042       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1043       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1044       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1045       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1046       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1047       if (Subtarget.hasStdExtZfh())
1048         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1049       if (Subtarget.hasStdExtF())
1050         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1051       if (Subtarget.hasStdExtD())
1052         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1053     }
1054   }
1055 
1056   // Function alignments.
1057   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1058   setMinFunctionAlignment(FunctionAlignment);
1059   setPrefFunctionAlignment(FunctionAlignment);
1060 
1061   setMinimumJumpTableEntries(5);
1062 
1063   // Jumps are expensive, compared to logic
1064   setJumpIsExpensive();
1065 
1066   setTargetDAGCombine(ISD::ADD);
1067   setTargetDAGCombine(ISD::SUB);
1068   setTargetDAGCombine(ISD::AND);
1069   setTargetDAGCombine(ISD::OR);
1070   setTargetDAGCombine(ISD::XOR);
1071   setTargetDAGCombine(ISD::ROTL);
1072   setTargetDAGCombine(ISD::ROTR);
1073   setTargetDAGCombine(ISD::ANY_EXTEND);
1074   if (Subtarget.hasStdExtF()) {
1075     setTargetDAGCombine(ISD::ZERO_EXTEND);
1076     setTargetDAGCombine(ISD::FP_TO_SINT);
1077     setTargetDAGCombine(ISD::FP_TO_UINT);
1078     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1079     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1080   }
1081   if (Subtarget.hasVInstructions()) {
1082     setTargetDAGCombine(ISD::FCOPYSIGN);
1083     setTargetDAGCombine(ISD::MGATHER);
1084     setTargetDAGCombine(ISD::MSCATTER);
1085     setTargetDAGCombine(ISD::VP_GATHER);
1086     setTargetDAGCombine(ISD::VP_SCATTER);
1087     setTargetDAGCombine(ISD::SRA);
1088     setTargetDAGCombine(ISD::SRL);
1089     setTargetDAGCombine(ISD::SHL);
1090     setTargetDAGCombine(ISD::STORE);
1091     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1092   }
1093 
1094   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1095   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1096 }
1097 
1098 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1099                                             LLVMContext &Context,
1100                                             EVT VT) const {
1101   if (!VT.isVector())
1102     return getPointerTy(DL);
1103   if (Subtarget.hasVInstructions() &&
1104       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1105     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1106   return VT.changeVectorElementTypeToInteger();
1107 }
1108 
1109 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1110   return Subtarget.getXLenVT();
1111 }
1112 
1113 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1114                                              const CallInst &I,
1115                                              MachineFunction &MF,
1116                                              unsigned Intrinsic) const {
1117   auto &DL = I.getModule()->getDataLayout();
1118   switch (Intrinsic) {
1119   default:
1120     return false;
1121   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1122   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1123   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1124   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1125   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1126   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1127   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1128   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1129   case Intrinsic::riscv_masked_cmpxchg_i32:
1130     Info.opc = ISD::INTRINSIC_W_CHAIN;
1131     Info.memVT = MVT::i32;
1132     Info.ptrVal = I.getArgOperand(0);
1133     Info.offset = 0;
1134     Info.align = Align(4);
1135     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1136                  MachineMemOperand::MOVolatile;
1137     return true;
1138   case Intrinsic::riscv_masked_strided_load:
1139     Info.opc = ISD::INTRINSIC_W_CHAIN;
1140     Info.ptrVal = I.getArgOperand(1);
1141     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1142     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1143     Info.size = MemoryLocation::UnknownSize;
1144     Info.flags |= MachineMemOperand::MOLoad;
1145     return true;
1146   case Intrinsic::riscv_masked_strided_store:
1147     Info.opc = ISD::INTRINSIC_VOID;
1148     Info.ptrVal = I.getArgOperand(1);
1149     Info.memVT =
1150         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1151     Info.align = Align(
1152         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1153         8);
1154     Info.size = MemoryLocation::UnknownSize;
1155     Info.flags |= MachineMemOperand::MOStore;
1156     return true;
1157   }
1158 }
1159 
1160 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1161                                                 const AddrMode &AM, Type *Ty,
1162                                                 unsigned AS,
1163                                                 Instruction *I) const {
1164   // No global is ever allowed as a base.
1165   if (AM.BaseGV)
1166     return false;
1167 
1168   // Require a 12-bit signed offset.
1169   if (!isInt<12>(AM.BaseOffs))
1170     return false;
1171 
1172   switch (AM.Scale) {
1173   case 0: // "r+i" or just "i", depending on HasBaseReg.
1174     break;
1175   case 1:
1176     if (!AM.HasBaseReg) // allow "r+i".
1177       break;
1178     return false; // disallow "r+r" or "r+r+i".
1179   default:
1180     return false;
1181   }
1182 
1183   return true;
1184 }
1185 
1186 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1187   return isInt<12>(Imm);
1188 }
1189 
1190 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1191   return isInt<12>(Imm);
1192 }
1193 
1194 // On RV32, 64-bit integers are split into their high and low parts and held
1195 // in two different registers, so the trunc is free since the low register can
1196 // just be used.
1197 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1198   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1199     return false;
1200   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1201   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1202   return (SrcBits == 64 && DestBits == 32);
1203 }
1204 
1205 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1206   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1207       !SrcVT.isInteger() || !DstVT.isInteger())
1208     return false;
1209   unsigned SrcBits = SrcVT.getSizeInBits();
1210   unsigned DestBits = DstVT.getSizeInBits();
1211   return (SrcBits == 64 && DestBits == 32);
1212 }
1213 
1214 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1215   // Zexts are free if they can be combined with a load.
1216   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1217   // poorly with type legalization of compares preferring sext.
1218   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1219     EVT MemVT = LD->getMemoryVT();
1220     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1221         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1222          LD->getExtensionType() == ISD::ZEXTLOAD))
1223       return true;
1224   }
1225 
1226   return TargetLowering::isZExtFree(Val, VT2);
1227 }
1228 
1229 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1230   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1231 }
1232 
1233 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1234   return Subtarget.hasStdExtZbb();
1235 }
1236 
1237 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1238   return Subtarget.hasStdExtZbb();
1239 }
1240 
1241 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1242   EVT VT = Y.getValueType();
1243 
1244   // FIXME: Support vectors once we have tests.
1245   if (VT.isVector())
1246     return false;
1247 
1248   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1249           Subtarget.hasStdExtZbkb()) &&
1250          !isa<ConstantSDNode>(Y);
1251 }
1252 
1253 /// Check if sinking \p I's operands to I's basic block is profitable, because
1254 /// the operands can be folded into a target instruction, e.g.
1255 /// splats of scalars can fold into vector instructions.
1256 bool RISCVTargetLowering::shouldSinkOperands(
1257     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1258   using namespace llvm::PatternMatch;
1259 
1260   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1261     return false;
1262 
1263   auto IsSinker = [&](Instruction *I, int Operand) {
1264     switch (I->getOpcode()) {
1265     case Instruction::Add:
1266     case Instruction::Sub:
1267     case Instruction::Mul:
1268     case Instruction::And:
1269     case Instruction::Or:
1270     case Instruction::Xor:
1271     case Instruction::FAdd:
1272     case Instruction::FSub:
1273     case Instruction::FMul:
1274     case Instruction::FDiv:
1275     case Instruction::ICmp:
1276     case Instruction::FCmp:
1277       return true;
1278     case Instruction::Shl:
1279     case Instruction::LShr:
1280     case Instruction::AShr:
1281     case Instruction::UDiv:
1282     case Instruction::SDiv:
1283     case Instruction::URem:
1284     case Instruction::SRem:
1285       return Operand == 1;
1286     case Instruction::Call:
1287       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1288         switch (II->getIntrinsicID()) {
1289         case Intrinsic::fma:
1290         case Intrinsic::vp_fma:
1291           return Operand == 0 || Operand == 1;
1292         // FIXME: Our patterns can only match vx/vf instructions when the splat
1293         // it on the RHS, because TableGen doesn't recognize our VP operations
1294         // as commutative.
1295         case Intrinsic::vp_add:
1296         case Intrinsic::vp_mul:
1297         case Intrinsic::vp_and:
1298         case Intrinsic::vp_or:
1299         case Intrinsic::vp_xor:
1300         case Intrinsic::vp_fadd:
1301         case Intrinsic::vp_fmul:
1302         case Intrinsic::vp_shl:
1303         case Intrinsic::vp_lshr:
1304         case Intrinsic::vp_ashr:
1305         case Intrinsic::vp_udiv:
1306         case Intrinsic::vp_sdiv:
1307         case Intrinsic::vp_urem:
1308         case Intrinsic::vp_srem:
1309           return Operand == 1;
1310         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1311         // explicit patterns for both LHS and RHS (as 'vr' versions).
1312         case Intrinsic::vp_sub:
1313         case Intrinsic::vp_fsub:
1314         case Intrinsic::vp_fdiv:
1315           return Operand == 0 || Operand == 1;
1316         default:
1317           return false;
1318         }
1319       }
1320       return false;
1321     default:
1322       return false;
1323     }
1324   };
1325 
1326   for (auto OpIdx : enumerate(I->operands())) {
1327     if (!IsSinker(I, OpIdx.index()))
1328       continue;
1329 
1330     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1331     // Make sure we are not already sinking this operand
1332     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1333       continue;
1334 
1335     // We are looking for a splat that can be sunk.
1336     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1337                              m_Undef(), m_ZeroMask())))
1338       continue;
1339 
1340     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1341     // and vector registers
1342     for (Use &U : Op->uses()) {
1343       Instruction *Insn = cast<Instruction>(U.getUser());
1344       if (!IsSinker(Insn, U.getOperandNo()))
1345         return false;
1346     }
1347 
1348     Ops.push_back(&Op->getOperandUse(0));
1349     Ops.push_back(&OpIdx.value());
1350   }
1351   return true;
1352 }
1353 
1354 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1355                                        bool ForCodeSize) const {
1356   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1357   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1358     return false;
1359   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1360     return false;
1361   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1362     return false;
1363   return Imm.isZero();
1364 }
1365 
1366 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1367   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1368          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1369          (VT == MVT::f64 && Subtarget.hasStdExtD());
1370 }
1371 
1372 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1373                                                       CallingConv::ID CC,
1374                                                       EVT VT) const {
1375   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1376   // We might still end up using a GPR but that will be decided based on ABI.
1377   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1378   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1379     return MVT::f32;
1380 
1381   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1382 }
1383 
1384 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1385                                                            CallingConv::ID CC,
1386                                                            EVT VT) const {
1387   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1388   // We might still end up using a GPR but that will be decided based on ABI.
1389   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1390   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1391     return 1;
1392 
1393   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1394 }
1395 
1396 // Changes the condition code and swaps operands if necessary, so the SetCC
1397 // operation matches one of the comparisons supported directly by branches
1398 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1399 // with 1/-1.
1400 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1401                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1402   // Convert X > -1 to X >= 0.
1403   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1404     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1405     CC = ISD::SETGE;
1406     return;
1407   }
1408   // Convert X < 1 to 0 >= X.
1409   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1410     RHS = LHS;
1411     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1412     CC = ISD::SETGE;
1413     return;
1414   }
1415 
1416   switch (CC) {
1417   default:
1418     break;
1419   case ISD::SETGT:
1420   case ISD::SETLE:
1421   case ISD::SETUGT:
1422   case ISD::SETULE:
1423     CC = ISD::getSetCCSwappedOperands(CC);
1424     std::swap(LHS, RHS);
1425     break;
1426   }
1427 }
1428 
1429 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1430   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1431   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1432   if (VT.getVectorElementType() == MVT::i1)
1433     KnownSize *= 8;
1434 
1435   switch (KnownSize) {
1436   default:
1437     llvm_unreachable("Invalid LMUL.");
1438   case 8:
1439     return RISCVII::VLMUL::LMUL_F8;
1440   case 16:
1441     return RISCVII::VLMUL::LMUL_F4;
1442   case 32:
1443     return RISCVII::VLMUL::LMUL_F2;
1444   case 64:
1445     return RISCVII::VLMUL::LMUL_1;
1446   case 128:
1447     return RISCVII::VLMUL::LMUL_2;
1448   case 256:
1449     return RISCVII::VLMUL::LMUL_4;
1450   case 512:
1451     return RISCVII::VLMUL::LMUL_8;
1452   }
1453 }
1454 
1455 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1456   switch (LMul) {
1457   default:
1458     llvm_unreachable("Invalid LMUL.");
1459   case RISCVII::VLMUL::LMUL_F8:
1460   case RISCVII::VLMUL::LMUL_F4:
1461   case RISCVII::VLMUL::LMUL_F2:
1462   case RISCVII::VLMUL::LMUL_1:
1463     return RISCV::VRRegClassID;
1464   case RISCVII::VLMUL::LMUL_2:
1465     return RISCV::VRM2RegClassID;
1466   case RISCVII::VLMUL::LMUL_4:
1467     return RISCV::VRM4RegClassID;
1468   case RISCVII::VLMUL::LMUL_8:
1469     return RISCV::VRM8RegClassID;
1470   }
1471 }
1472 
1473 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1474   RISCVII::VLMUL LMUL = getLMUL(VT);
1475   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1476       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1477       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1478       LMUL == RISCVII::VLMUL::LMUL_1) {
1479     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1480                   "Unexpected subreg numbering");
1481     return RISCV::sub_vrm1_0 + Index;
1482   }
1483   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1484     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1485                   "Unexpected subreg numbering");
1486     return RISCV::sub_vrm2_0 + Index;
1487   }
1488   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1489     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1490                   "Unexpected subreg numbering");
1491     return RISCV::sub_vrm4_0 + Index;
1492   }
1493   llvm_unreachable("Invalid vector type.");
1494 }
1495 
1496 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1497   if (VT.getVectorElementType() == MVT::i1)
1498     return RISCV::VRRegClassID;
1499   return getRegClassIDForLMUL(getLMUL(VT));
1500 }
1501 
1502 // Attempt to decompose a subvector insert/extract between VecVT and
1503 // SubVecVT via subregister indices. Returns the subregister index that
1504 // can perform the subvector insert/extract with the given element index, as
1505 // well as the index corresponding to any leftover subvectors that must be
1506 // further inserted/extracted within the register class for SubVecVT.
1507 std::pair<unsigned, unsigned>
1508 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1509     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1510     const RISCVRegisterInfo *TRI) {
1511   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1512                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1513                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1514                 "Register classes not ordered");
1515   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1516   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1517   // Try to compose a subregister index that takes us from the incoming
1518   // LMUL>1 register class down to the outgoing one. At each step we half
1519   // the LMUL:
1520   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1521   // Note that this is not guaranteed to find a subregister index, such as
1522   // when we are extracting from one VR type to another.
1523   unsigned SubRegIdx = RISCV::NoSubRegister;
1524   for (const unsigned RCID :
1525        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1526     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1527       VecVT = VecVT.getHalfNumVectorElementsVT();
1528       bool IsHi =
1529           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1530       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1531                                             getSubregIndexByMVT(VecVT, IsHi));
1532       if (IsHi)
1533         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1534     }
1535   return {SubRegIdx, InsertExtractIdx};
1536 }
1537 
1538 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1539 // stores for those types.
1540 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1541   return !Subtarget.useRVVForFixedLengthVectors() ||
1542          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1543 }
1544 
1545 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1546   if (ScalarTy->isPointerTy())
1547     return true;
1548 
1549   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1550       ScalarTy->isIntegerTy(32))
1551     return true;
1552 
1553   if (ScalarTy->isIntegerTy(64))
1554     return Subtarget.hasVInstructionsI64();
1555 
1556   if (ScalarTy->isHalfTy())
1557     return Subtarget.hasVInstructionsF16();
1558   if (ScalarTy->isFloatTy())
1559     return Subtarget.hasVInstructionsF32();
1560   if (ScalarTy->isDoubleTy())
1561     return Subtarget.hasVInstructionsF64();
1562 
1563   return false;
1564 }
1565 
1566 static SDValue getVLOperand(SDValue Op) {
1567   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1568           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1569          "Unexpected opcode");
1570   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1571   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1572   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1573       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1574   if (!II)
1575     return SDValue();
1576   return Op.getOperand(II->VLOperand + 1 + HasChain);
1577 }
1578 
1579 static bool useRVVForFixedLengthVectorVT(MVT VT,
1580                                          const RISCVSubtarget &Subtarget) {
1581   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1582   if (!Subtarget.useRVVForFixedLengthVectors())
1583     return false;
1584 
1585   // We only support a set of vector types with a consistent maximum fixed size
1586   // across all supported vector element types to avoid legalization issues.
1587   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1588   // fixed-length vector type we support is 1024 bytes.
1589   if (VT.getFixedSizeInBits() > 1024 * 8)
1590     return false;
1591 
1592   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1593 
1594   MVT EltVT = VT.getVectorElementType();
1595 
1596   // Don't use RVV for vectors we cannot scalarize if required.
1597   switch (EltVT.SimpleTy) {
1598   // i1 is supported but has different rules.
1599   default:
1600     return false;
1601   case MVT::i1:
1602     // Masks can only use a single register.
1603     if (VT.getVectorNumElements() > MinVLen)
1604       return false;
1605     MinVLen /= 8;
1606     break;
1607   case MVT::i8:
1608   case MVT::i16:
1609   case MVT::i32:
1610     break;
1611   case MVT::i64:
1612     if (!Subtarget.hasVInstructionsI64())
1613       return false;
1614     break;
1615   case MVT::f16:
1616     if (!Subtarget.hasVInstructionsF16())
1617       return false;
1618     break;
1619   case MVT::f32:
1620     if (!Subtarget.hasVInstructionsF32())
1621       return false;
1622     break;
1623   case MVT::f64:
1624     if (!Subtarget.hasVInstructionsF64())
1625       return false;
1626     break;
1627   }
1628 
1629   // Reject elements larger than ELEN.
1630   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1631     return false;
1632 
1633   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1634   // Don't use RVV for types that don't fit.
1635   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1636     return false;
1637 
1638   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1639   // the base fixed length RVV support in place.
1640   if (!VT.isPow2VectorType())
1641     return false;
1642 
1643   return true;
1644 }
1645 
1646 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1647   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1648 }
1649 
1650 // Return the largest legal scalable vector type that matches VT's element type.
1651 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1652                                             const RISCVSubtarget &Subtarget) {
1653   // This may be called before legal types are setup.
1654   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1655           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1656          "Expected legal fixed length vector!");
1657 
1658   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1659   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1660 
1661   MVT EltVT = VT.getVectorElementType();
1662   switch (EltVT.SimpleTy) {
1663   default:
1664     llvm_unreachable("unexpected element type for RVV container");
1665   case MVT::i1:
1666   case MVT::i8:
1667   case MVT::i16:
1668   case MVT::i32:
1669   case MVT::i64:
1670   case MVT::f16:
1671   case MVT::f32:
1672   case MVT::f64: {
1673     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1674     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1675     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1676     unsigned NumElts =
1677         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1678     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1679     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1680     return MVT::getScalableVectorVT(EltVT, NumElts);
1681   }
1682   }
1683 }
1684 
1685 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1686                                             const RISCVSubtarget &Subtarget) {
1687   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1688                                           Subtarget);
1689 }
1690 
1691 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1692   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1693 }
1694 
1695 // Grow V to consume an entire RVV register.
1696 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1697                                        const RISCVSubtarget &Subtarget) {
1698   assert(VT.isScalableVector() &&
1699          "Expected to convert into a scalable vector!");
1700   assert(V.getValueType().isFixedLengthVector() &&
1701          "Expected a fixed length vector operand!");
1702   SDLoc DL(V);
1703   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1704   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1705 }
1706 
1707 // Shrink V so it's just big enough to maintain a VT's worth of data.
1708 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1709                                          const RISCVSubtarget &Subtarget) {
1710   assert(VT.isFixedLengthVector() &&
1711          "Expected to convert into a fixed length vector!");
1712   assert(V.getValueType().isScalableVector() &&
1713          "Expected a scalable vector operand!");
1714   SDLoc DL(V);
1715   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1716   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1717 }
1718 
1719 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1720 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1721 // the vector type that it is contained in.
1722 static std::pair<SDValue, SDValue>
1723 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1724                 const RISCVSubtarget &Subtarget) {
1725   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1726   MVT XLenVT = Subtarget.getXLenVT();
1727   SDValue VL = VecVT.isFixedLengthVector()
1728                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1729                    : DAG.getRegister(RISCV::X0, XLenVT);
1730   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1731   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1732   return {Mask, VL};
1733 }
1734 
1735 // As above but assuming the given type is a scalable vector type.
1736 static std::pair<SDValue, SDValue>
1737 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1738                         const RISCVSubtarget &Subtarget) {
1739   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1740   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1741 }
1742 
1743 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1744 // of either is (currently) supported. This can get us into an infinite loop
1745 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1746 // as a ..., etc.
1747 // Until either (or both) of these can reliably lower any node, reporting that
1748 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1749 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1750 // which is not desirable.
1751 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1752     EVT VT, unsigned DefinedValues) const {
1753   return false;
1754 }
1755 
1756 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1757                                   const RISCVSubtarget &Subtarget) {
1758   // RISCV FP-to-int conversions saturate to the destination register size, but
1759   // don't produce 0 for nan. We can use a conversion instruction and fix the
1760   // nan case with a compare and a select.
1761   SDValue Src = Op.getOperand(0);
1762 
1763   EVT DstVT = Op.getValueType();
1764   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1765 
1766   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1767   unsigned Opc;
1768   if (SatVT == DstVT)
1769     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1770   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1771     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1772   else
1773     return SDValue();
1774   // FIXME: Support other SatVTs by clamping before or after the conversion.
1775 
1776   SDLoc DL(Op);
1777   SDValue FpToInt = DAG.getNode(
1778       Opc, DL, DstVT, Src,
1779       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1780 
1781   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1782   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1783 }
1784 
1785 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1786 // and back. Taking care to avoid converting values that are nan or already
1787 // correct.
1788 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1789 // have FRM dependencies modeled yet.
1790 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1791   MVT VT = Op.getSimpleValueType();
1792   assert(VT.isVector() && "Unexpected type");
1793 
1794   SDLoc DL(Op);
1795 
1796   // Freeze the source since we are increasing the number of uses.
1797   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1798 
1799   // Truncate to integer and convert back to FP.
1800   MVT IntVT = VT.changeVectorElementTypeToInteger();
1801   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1802   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1803 
1804   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1805 
1806   if (Op.getOpcode() == ISD::FCEIL) {
1807     // If the truncated value is the greater than or equal to the original
1808     // value, we've computed the ceil. Otherwise, we went the wrong way and
1809     // need to increase by 1.
1810     // FIXME: This should use a masked operation. Handle here or in isel?
1811     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1812                                  DAG.getConstantFP(1.0, DL, VT));
1813     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1814     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1815   } else if (Op.getOpcode() == ISD::FFLOOR) {
1816     // If the truncated value is the less than or equal to the original value,
1817     // we've computed the floor. Otherwise, we went the wrong way and need to
1818     // decrease by 1.
1819     // FIXME: This should use a masked operation. Handle here or in isel?
1820     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1821                                  DAG.getConstantFP(1.0, DL, VT));
1822     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1823     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1824   }
1825 
1826   // Restore the original sign so that -0.0 is preserved.
1827   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1828 
1829   // Determine the largest integer that can be represented exactly. This and
1830   // values larger than it don't have any fractional bits so don't need to
1831   // be converted.
1832   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1833   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1834   APFloat MaxVal = APFloat(FltSem);
1835   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1836                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1837   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1838 
1839   // If abs(Src) was larger than MaxVal or nan, keep it.
1840   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1841   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1842   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1843 }
1844 
1845 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1846 // This mode isn't supported in vector hardware on RISCV. But as long as we
1847 // aren't compiling with trapping math, we can emulate this with
1848 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1849 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1850 // dependencies modeled yet.
1851 // FIXME: Use masked operations to avoid final merge.
1852 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1853   MVT VT = Op.getSimpleValueType();
1854   assert(VT.isVector() && "Unexpected type");
1855 
1856   SDLoc DL(Op);
1857 
1858   // Freeze the source since we are increasing the number of uses.
1859   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1860 
1861   // We do the conversion on the absolute value and fix the sign at the end.
1862   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1863 
1864   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1865   bool Ignored;
1866   APFloat Point5Pred = APFloat(0.5f);
1867   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1868   Point5Pred.next(/*nextDown*/ true);
1869 
1870   // Add the adjustment.
1871   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1872                                DAG.getConstantFP(Point5Pred, DL, VT));
1873 
1874   // Truncate to integer and convert back to fp.
1875   MVT IntVT = VT.changeVectorElementTypeToInteger();
1876   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1877   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1878 
1879   // Restore the original sign.
1880   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1881 
1882   // Determine the largest integer that can be represented exactly. This and
1883   // values larger than it don't have any fractional bits so don't need to
1884   // be converted.
1885   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1886   APFloat MaxVal = APFloat(FltSem);
1887   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1888                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1889   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1890 
1891   // If abs(Src) was larger than MaxVal or nan, keep it.
1892   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1893   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1894   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1895 }
1896 
1897 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1898                                  const RISCVSubtarget &Subtarget) {
1899   MVT VT = Op.getSimpleValueType();
1900   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1901 
1902   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1903 
1904   SDLoc DL(Op);
1905   SDValue Mask, VL;
1906   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1907 
1908   unsigned Opc =
1909       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1910   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
1911                               Op.getOperand(0), VL);
1912   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1913 }
1914 
1915 struct VIDSequence {
1916   int64_t StepNumerator;
1917   unsigned StepDenominator;
1918   int64_t Addend;
1919 };
1920 
1921 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1922 // to the (non-zero) step S and start value X. This can be then lowered as the
1923 // RVV sequence (VID * S) + X, for example.
1924 // The step S is represented as an integer numerator divided by a positive
1925 // denominator. Note that the implementation currently only identifies
1926 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1927 // cannot detect 2/3, for example.
1928 // Note that this method will also match potentially unappealing index
1929 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1930 // determine whether this is worth generating code for.
1931 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1932   unsigned NumElts = Op.getNumOperands();
1933   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1934   if (!Op.getValueType().isInteger())
1935     return None;
1936 
1937   Optional<unsigned> SeqStepDenom;
1938   Optional<int64_t> SeqStepNum, SeqAddend;
1939   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1940   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1941   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1942     // Assume undef elements match the sequence; we just have to be careful
1943     // when interpolating across them.
1944     if (Op.getOperand(Idx).isUndef())
1945       continue;
1946     // The BUILD_VECTOR must be all constants.
1947     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1948       return None;
1949 
1950     uint64_t Val = Op.getConstantOperandVal(Idx) &
1951                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1952 
1953     if (PrevElt) {
1954       // Calculate the step since the last non-undef element, and ensure
1955       // it's consistent across the entire sequence.
1956       unsigned IdxDiff = Idx - PrevElt->second;
1957       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1958 
1959       // A zero-value value difference means that we're somewhere in the middle
1960       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1961       // step change before evaluating the sequence.
1962       if (ValDiff != 0) {
1963         int64_t Remainder = ValDiff % IdxDiff;
1964         // Normalize the step if it's greater than 1.
1965         if (Remainder != ValDiff) {
1966           // The difference must cleanly divide the element span.
1967           if (Remainder != 0)
1968             return None;
1969           ValDiff /= IdxDiff;
1970           IdxDiff = 1;
1971         }
1972 
1973         if (!SeqStepNum)
1974           SeqStepNum = ValDiff;
1975         else if (ValDiff != SeqStepNum)
1976           return None;
1977 
1978         if (!SeqStepDenom)
1979           SeqStepDenom = IdxDiff;
1980         else if (IdxDiff != *SeqStepDenom)
1981           return None;
1982       }
1983     }
1984 
1985     // Record and/or check any addend.
1986     if (SeqStepNum && SeqStepDenom) {
1987       uint64_t ExpectedVal =
1988           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1989       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1990       if (!SeqAddend)
1991         SeqAddend = Addend;
1992       else if (SeqAddend != Addend)
1993         return None;
1994     }
1995 
1996     // Record this non-undef element for later.
1997     if (!PrevElt || PrevElt->first != Val)
1998       PrevElt = std::make_pair(Val, Idx);
1999   }
2000   // We need to have logged both a step and an addend for this to count as
2001   // a legal index sequence.
2002   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
2003     return None;
2004 
2005   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
2006 }
2007 
2008 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
2009 // and lower it as a VRGATHER_VX_VL from the source vector.
2010 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
2011                                   SelectionDAG &DAG,
2012                                   const RISCVSubtarget &Subtarget) {
2013   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2014     return SDValue();
2015   SDValue Vec = SplatVal.getOperand(0);
2016   // Only perform this optimization on vectors of the same size for simplicity.
2017   if (Vec.getValueType() != VT)
2018     return SDValue();
2019   SDValue Idx = SplatVal.getOperand(1);
2020   // The index must be a legal type.
2021   if (Idx.getValueType() != Subtarget.getXLenVT())
2022     return SDValue();
2023 
2024   MVT ContainerVT = VT;
2025   if (VT.isFixedLengthVector()) {
2026     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2027     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2028   }
2029 
2030   SDValue Mask, VL;
2031   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2032 
2033   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2034                                Idx, Mask, VL);
2035 
2036   if (!VT.isFixedLengthVector())
2037     return Gather;
2038 
2039   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2040 }
2041 
2042 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2043                                  const RISCVSubtarget &Subtarget) {
2044   MVT VT = Op.getSimpleValueType();
2045   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2046 
2047   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2048 
2049   SDLoc DL(Op);
2050   SDValue Mask, VL;
2051   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2052 
2053   MVT XLenVT = Subtarget.getXLenVT();
2054   unsigned NumElts = Op.getNumOperands();
2055 
2056   if (VT.getVectorElementType() == MVT::i1) {
2057     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2058       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2059       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2060     }
2061 
2062     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2063       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2064       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2065     }
2066 
2067     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2068     // scalar integer chunks whose bit-width depends on the number of mask
2069     // bits and XLEN.
2070     // First, determine the most appropriate scalar integer type to use. This
2071     // is at most XLenVT, but may be shrunk to a smaller vector element type
2072     // according to the size of the final vector - use i8 chunks rather than
2073     // XLenVT if we're producing a v8i1. This results in more consistent
2074     // codegen across RV32 and RV64.
2075     unsigned NumViaIntegerBits =
2076         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2077     NumViaIntegerBits = std::min(NumViaIntegerBits,
2078                                  Subtarget.getMaxELENForFixedLengthVectors());
2079     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2080       // If we have to use more than one INSERT_VECTOR_ELT then this
2081       // optimization is likely to increase code size; avoid peforming it in
2082       // such a case. We can use a load from a constant pool in this case.
2083       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2084         return SDValue();
2085       // Now we can create our integer vector type. Note that it may be larger
2086       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2087       MVT IntegerViaVecVT =
2088           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2089                            divideCeil(NumElts, NumViaIntegerBits));
2090 
2091       uint64_t Bits = 0;
2092       unsigned BitPos = 0, IntegerEltIdx = 0;
2093       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2094 
2095       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2096         // Once we accumulate enough bits to fill our scalar type, insert into
2097         // our vector and clear our accumulated data.
2098         if (I != 0 && I % NumViaIntegerBits == 0) {
2099           if (NumViaIntegerBits <= 32)
2100             Bits = SignExtend64(Bits, 32);
2101           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2102           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2103                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2104           Bits = 0;
2105           BitPos = 0;
2106           IntegerEltIdx++;
2107         }
2108         SDValue V = Op.getOperand(I);
2109         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2110         Bits |= ((uint64_t)BitValue << BitPos);
2111       }
2112 
2113       // Insert the (remaining) scalar value into position in our integer
2114       // vector type.
2115       if (NumViaIntegerBits <= 32)
2116         Bits = SignExtend64(Bits, 32);
2117       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2118       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2119                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2120 
2121       if (NumElts < NumViaIntegerBits) {
2122         // If we're producing a smaller vector than our minimum legal integer
2123         // type, bitcast to the equivalent (known-legal) mask type, and extract
2124         // our final mask.
2125         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2126         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2127         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2128                           DAG.getConstant(0, DL, XLenVT));
2129       } else {
2130         // Else we must have produced an integer type with the same size as the
2131         // mask type; bitcast for the final result.
2132         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2133         Vec = DAG.getBitcast(VT, Vec);
2134       }
2135 
2136       return Vec;
2137     }
2138 
2139     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2140     // vector type, we have a legal equivalently-sized i8 type, so we can use
2141     // that.
2142     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2143     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2144 
2145     SDValue WideVec;
2146     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2147       // For a splat, perform a scalar truncate before creating the wider
2148       // vector.
2149       assert(Splat.getValueType() == XLenVT &&
2150              "Unexpected type for i1 splat value");
2151       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2152                           DAG.getConstant(1, DL, XLenVT));
2153       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2154     } else {
2155       SmallVector<SDValue, 8> Ops(Op->op_values());
2156       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2157       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2158       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2159     }
2160 
2161     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2162   }
2163 
2164   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2165     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2166       return Gather;
2167     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2168                                         : RISCVISD::VMV_V_X_VL;
2169     Splat =
2170         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2171     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2172   }
2173 
2174   // Try and match index sequences, which we can lower to the vid instruction
2175   // with optional modifications. An all-undef vector is matched by
2176   // getSplatValue, above.
2177   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2178     int64_t StepNumerator = SimpleVID->StepNumerator;
2179     unsigned StepDenominator = SimpleVID->StepDenominator;
2180     int64_t Addend = SimpleVID->Addend;
2181 
2182     assert(StepNumerator != 0 && "Invalid step");
2183     bool Negate = false;
2184     int64_t SplatStepVal = StepNumerator;
2185     unsigned StepOpcode = ISD::MUL;
2186     if (StepNumerator != 1) {
2187       if (isPowerOf2_64(std::abs(StepNumerator))) {
2188         Negate = StepNumerator < 0;
2189         StepOpcode = ISD::SHL;
2190         SplatStepVal = Log2_64(std::abs(StepNumerator));
2191       }
2192     }
2193 
2194     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2195     // threshold since it's the immediate value many RVV instructions accept.
2196     // There is no vmul.vi instruction so ensure multiply constant can fit in
2197     // a single addi instruction.
2198     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2199          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2200         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2201       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2202       // Convert right out of the scalable type so we can use standard ISD
2203       // nodes for the rest of the computation. If we used scalable types with
2204       // these, we'd lose the fixed-length vector info and generate worse
2205       // vsetvli code.
2206       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2207       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2208           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2209         SDValue SplatStep = DAG.getSplatVector(
2210             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2211         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2212       }
2213       if (StepDenominator != 1) {
2214         SDValue SplatStep = DAG.getSplatVector(
2215             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2216         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2217       }
2218       if (Addend != 0 || Negate) {
2219         SDValue SplatAddend =
2220             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2221         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2222       }
2223       return VID;
2224     }
2225   }
2226 
2227   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2228   // when re-interpreted as a vector with a larger element type. For example,
2229   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2230   // could be instead splat as
2231   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2232   // TODO: This optimization could also work on non-constant splats, but it
2233   // would require bit-manipulation instructions to construct the splat value.
2234   SmallVector<SDValue> Sequence;
2235   unsigned EltBitSize = VT.getScalarSizeInBits();
2236   const auto *BV = cast<BuildVectorSDNode>(Op);
2237   if (VT.isInteger() && EltBitSize < 64 &&
2238       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2239       BV->getRepeatedSequence(Sequence) &&
2240       (Sequence.size() * EltBitSize) <= 64) {
2241     unsigned SeqLen = Sequence.size();
2242     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2243     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2244     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2245             ViaIntVT == MVT::i64) &&
2246            "Unexpected sequence type");
2247 
2248     unsigned EltIdx = 0;
2249     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2250     uint64_t SplatValue = 0;
2251     // Construct the amalgamated value which can be splatted as this larger
2252     // vector type.
2253     for (const auto &SeqV : Sequence) {
2254       if (!SeqV.isUndef())
2255         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2256                        << (EltIdx * EltBitSize));
2257       EltIdx++;
2258     }
2259 
2260     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2261     // achieve better constant materializion.
2262     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2263       SplatValue = SignExtend64(SplatValue, 32);
2264 
2265     // Since we can't introduce illegal i64 types at this stage, we can only
2266     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2267     // way we can use RVV instructions to splat.
2268     assert((ViaIntVT.bitsLE(XLenVT) ||
2269             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2270            "Unexpected bitcast sequence");
2271     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2272       SDValue ViaVL =
2273           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2274       MVT ViaContainerVT =
2275           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2276       SDValue Splat =
2277           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2278                       DAG.getUNDEF(ViaContainerVT),
2279                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2280       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2281       return DAG.getBitcast(VT, Splat);
2282     }
2283   }
2284 
2285   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2286   // which constitute a large proportion of the elements. In such cases we can
2287   // splat a vector with the dominant element and make up the shortfall with
2288   // INSERT_VECTOR_ELTs.
2289   // Note that this includes vectors of 2 elements by association. The
2290   // upper-most element is the "dominant" one, allowing us to use a splat to
2291   // "insert" the upper element, and an insert of the lower element at position
2292   // 0, which improves codegen.
2293   SDValue DominantValue;
2294   unsigned MostCommonCount = 0;
2295   DenseMap<SDValue, unsigned> ValueCounts;
2296   unsigned NumUndefElts =
2297       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2298 
2299   // Track the number of scalar loads we know we'd be inserting, estimated as
2300   // any non-zero floating-point constant. Other kinds of element are either
2301   // already in registers or are materialized on demand. The threshold at which
2302   // a vector load is more desirable than several scalar materializion and
2303   // vector-insertion instructions is not known.
2304   unsigned NumScalarLoads = 0;
2305 
2306   for (SDValue V : Op->op_values()) {
2307     if (V.isUndef())
2308       continue;
2309 
2310     ValueCounts.insert(std::make_pair(V, 0));
2311     unsigned &Count = ValueCounts[V];
2312 
2313     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2314       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2315 
2316     // Is this value dominant? In case of a tie, prefer the highest element as
2317     // it's cheaper to insert near the beginning of a vector than it is at the
2318     // end.
2319     if (++Count >= MostCommonCount) {
2320       DominantValue = V;
2321       MostCommonCount = Count;
2322     }
2323   }
2324 
2325   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2326   unsigned NumDefElts = NumElts - NumUndefElts;
2327   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2328 
2329   // Don't perform this optimization when optimizing for size, since
2330   // materializing elements and inserting them tends to cause code bloat.
2331   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2332       ((MostCommonCount > DominantValueCountThreshold) ||
2333        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2334     // Start by splatting the most common element.
2335     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2336 
2337     DenseSet<SDValue> Processed{DominantValue};
2338     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2339     for (const auto &OpIdx : enumerate(Op->ops())) {
2340       const SDValue &V = OpIdx.value();
2341       if (V.isUndef() || !Processed.insert(V).second)
2342         continue;
2343       if (ValueCounts[V] == 1) {
2344         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2345                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2346       } else {
2347         // Blend in all instances of this value using a VSELECT, using a
2348         // mask where each bit signals whether that element is the one
2349         // we're after.
2350         SmallVector<SDValue> Ops;
2351         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2352           return DAG.getConstant(V == V1, DL, XLenVT);
2353         });
2354         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2355                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2356                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2357       }
2358     }
2359 
2360     return Vec;
2361   }
2362 
2363   return SDValue();
2364 }
2365 
2366 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2367                                    SDValue Lo, SDValue Hi, SDValue VL,
2368                                    SelectionDAG &DAG) {
2369   bool HasPassthru = Passthru && !Passthru.isUndef();
2370   if (!HasPassthru && !Passthru)
2371     Passthru = DAG.getUNDEF(VT);
2372   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2373     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2374     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2375     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2376     // node in order to try and match RVV vector/scalar instructions.
2377     if ((LoC >> 31) == HiC)
2378       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2379 
2380     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2381     // vmv.v.x whose EEW = 32 to lower it.
2382     auto *Const = dyn_cast<ConstantSDNode>(VL);
2383     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2384       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2385       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2386       // access the subtarget here now.
2387       auto InterVec = DAG.getNode(
2388           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2389                                   DAG.getRegister(RISCV::X0, MVT::i32));
2390       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2391     }
2392   }
2393 
2394   // Fall back to a stack store and stride x0 vector load.
2395   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2396                      Hi, VL);
2397 }
2398 
2399 // Called by type legalization to handle splat of i64 on RV32.
2400 // FIXME: We can optimize this when the type has sign or zero bits in one
2401 // of the halves.
2402 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2403                                    SDValue Scalar, SDValue VL,
2404                                    SelectionDAG &DAG) {
2405   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2406   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2407                            DAG.getConstant(0, DL, MVT::i32));
2408   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2409                            DAG.getConstant(1, DL, MVT::i32));
2410   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2411 }
2412 
2413 // This function lowers a splat of a scalar operand Splat with the vector
2414 // length VL. It ensures the final sequence is type legal, which is useful when
2415 // lowering a splat after type legalization.
2416 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2417                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2418                                 const RISCVSubtarget &Subtarget) {
2419   bool HasPassthru = Passthru && !Passthru.isUndef();
2420   if (!HasPassthru && !Passthru)
2421     Passthru = DAG.getUNDEF(VT);
2422   if (VT.isFloatingPoint()) {
2423     // If VL is 1, we could use vfmv.s.f.
2424     if (isOneConstant(VL))
2425       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2426     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2427   }
2428 
2429   MVT XLenVT = Subtarget.getXLenVT();
2430 
2431   // Simplest case is that the operand needs to be promoted to XLenVT.
2432   if (Scalar.getValueType().bitsLE(XLenVT)) {
2433     // If the operand is a constant, sign extend to increase our chances
2434     // of being able to use a .vi instruction. ANY_EXTEND would become a
2435     // a zero extend and the simm5 check in isel would fail.
2436     // FIXME: Should we ignore the upper bits in isel instead?
2437     unsigned ExtOpc =
2438         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2439     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2440     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2441     // If VL is 1 and the scalar value won't benefit from immediate, we could
2442     // use vmv.s.x.
2443     if (isOneConstant(VL) &&
2444         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2445       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2446     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2447   }
2448 
2449   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2450          "Unexpected scalar for splat lowering!");
2451 
2452   if (isOneConstant(VL) && isNullConstant(Scalar))
2453     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2454                        DAG.getConstant(0, DL, XLenVT), VL);
2455 
2456   // Otherwise use the more complicated splatting algorithm.
2457   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2458 }
2459 
2460 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2461                                 const RISCVSubtarget &Subtarget) {
2462   // We need to be able to widen elements to the next larger integer type.
2463   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2464     return false;
2465 
2466   int Size = Mask.size();
2467   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2468 
2469   int Srcs[] = {-1, -1};
2470   for (int i = 0; i != Size; ++i) {
2471     // Ignore undef elements.
2472     if (Mask[i] < 0)
2473       continue;
2474 
2475     // Is this an even or odd element.
2476     int Pol = i % 2;
2477 
2478     // Ensure we consistently use the same source for this element polarity.
2479     int Src = Mask[i] / Size;
2480     if (Srcs[Pol] < 0)
2481       Srcs[Pol] = Src;
2482     if (Srcs[Pol] != Src)
2483       return false;
2484 
2485     // Make sure the element within the source is appropriate for this element
2486     // in the destination.
2487     int Elt = Mask[i] % Size;
2488     if (Elt != i / 2)
2489       return false;
2490   }
2491 
2492   // We need to find a source for each polarity and they can't be the same.
2493   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2494     return false;
2495 
2496   // Swap the sources if the second source was in the even polarity.
2497   SwapSources = Srcs[0] > Srcs[1];
2498 
2499   return true;
2500 }
2501 
2502 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2503 /// and then extract the original number of elements from the rotated result.
2504 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2505 /// returned rotation amount is for a rotate right, where elements move from
2506 /// higher elements to lower elements. \p LoSrc indicates the first source
2507 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2508 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2509 /// 0 or 1 if a rotation is found.
2510 ///
2511 /// NOTE: We talk about rotate to the right which matches how bit shift and
2512 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2513 /// and the table below write vectors with the lowest elements on the left.
2514 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2515   int Size = Mask.size();
2516 
2517   // We need to detect various ways of spelling a rotation:
2518   //   [11, 12, 13, 14, 15,  0,  1,  2]
2519   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2520   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2521   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2522   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2523   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2524   int Rotation = 0;
2525   LoSrc = -1;
2526   HiSrc = -1;
2527   for (int i = 0; i != Size; ++i) {
2528     int M = Mask[i];
2529     if (M < 0)
2530       continue;
2531 
2532     // Determine where a rotate vector would have started.
2533     int StartIdx = i - (M % Size);
2534     // The identity rotation isn't interesting, stop.
2535     if (StartIdx == 0)
2536       return -1;
2537 
2538     // If we found the tail of a vector the rotation must be the missing
2539     // front. If we found the head of a vector, it must be how much of the
2540     // head.
2541     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2542 
2543     if (Rotation == 0)
2544       Rotation = CandidateRotation;
2545     else if (Rotation != CandidateRotation)
2546       // The rotations don't match, so we can't match this mask.
2547       return -1;
2548 
2549     // Compute which value this mask is pointing at.
2550     int MaskSrc = M < Size ? 0 : 1;
2551 
2552     // Compute which of the two target values this index should be assigned to.
2553     // This reflects whether the high elements are remaining or the low elemnts
2554     // are remaining.
2555     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2556 
2557     // Either set up this value if we've not encountered it before, or check
2558     // that it remains consistent.
2559     if (TargetSrc < 0)
2560       TargetSrc = MaskSrc;
2561     else if (TargetSrc != MaskSrc)
2562       // This may be a rotation, but it pulls from the inputs in some
2563       // unsupported interleaving.
2564       return -1;
2565   }
2566 
2567   // Check that we successfully analyzed the mask, and normalize the results.
2568   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2569   assert((LoSrc >= 0 || HiSrc >= 0) &&
2570          "Failed to find a rotated input vector!");
2571 
2572   return Rotation;
2573 }
2574 
2575 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2576                                    const RISCVSubtarget &Subtarget) {
2577   SDValue V1 = Op.getOperand(0);
2578   SDValue V2 = Op.getOperand(1);
2579   SDLoc DL(Op);
2580   MVT XLenVT = Subtarget.getXLenVT();
2581   MVT VT = Op.getSimpleValueType();
2582   unsigned NumElts = VT.getVectorNumElements();
2583   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2584 
2585   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2586 
2587   SDValue TrueMask, VL;
2588   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2589 
2590   if (SVN->isSplat()) {
2591     const int Lane = SVN->getSplatIndex();
2592     if (Lane >= 0) {
2593       MVT SVT = VT.getVectorElementType();
2594 
2595       // Turn splatted vector load into a strided load with an X0 stride.
2596       SDValue V = V1;
2597       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2598       // with undef.
2599       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2600       int Offset = Lane;
2601       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2602         int OpElements =
2603             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2604         V = V.getOperand(Offset / OpElements);
2605         Offset %= OpElements;
2606       }
2607 
2608       // We need to ensure the load isn't atomic or volatile.
2609       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2610         auto *Ld = cast<LoadSDNode>(V);
2611         Offset *= SVT.getStoreSize();
2612         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2613                                                    TypeSize::Fixed(Offset), DL);
2614 
2615         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2616         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2617           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2618           SDValue IntID =
2619               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2620           SDValue Ops[] = {Ld->getChain(),
2621                            IntID,
2622                            DAG.getUNDEF(ContainerVT),
2623                            NewAddr,
2624                            DAG.getRegister(RISCV::X0, XLenVT),
2625                            VL};
2626           SDValue NewLoad = DAG.getMemIntrinsicNode(
2627               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2628               DAG.getMachineFunction().getMachineMemOperand(
2629                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2630           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2631           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2632         }
2633 
2634         // Otherwise use a scalar load and splat. This will give the best
2635         // opportunity to fold a splat into the operation. ISel can turn it into
2636         // the x0 strided load if we aren't able to fold away the select.
2637         if (SVT.isFloatingPoint())
2638           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2639                           Ld->getPointerInfo().getWithOffset(Offset),
2640                           Ld->getOriginalAlign(),
2641                           Ld->getMemOperand()->getFlags());
2642         else
2643           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2644                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2645                              Ld->getOriginalAlign(),
2646                              Ld->getMemOperand()->getFlags());
2647         DAG.makeEquivalentMemoryOrdering(Ld, V);
2648 
2649         unsigned Opc =
2650             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2651         SDValue Splat =
2652             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2653         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2654       }
2655 
2656       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2657       assert(Lane < (int)NumElts && "Unexpected lane!");
2658       SDValue Gather =
2659           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2660                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2661       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2662     }
2663   }
2664 
2665   ArrayRef<int> Mask = SVN->getMask();
2666 
2667   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2668   // be undef which can be handled with a single SLIDEDOWN/UP.
2669   int LoSrc, HiSrc;
2670   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2671   if (Rotation > 0) {
2672     SDValue LoV, HiV;
2673     if (LoSrc >= 0) {
2674       LoV = LoSrc == 0 ? V1 : V2;
2675       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2676     }
2677     if (HiSrc >= 0) {
2678       HiV = HiSrc == 0 ? V1 : V2;
2679       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2680     }
2681 
2682     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2683     // to slide LoV up by (NumElts - Rotation).
2684     unsigned InvRotate = NumElts - Rotation;
2685 
2686     SDValue Res = DAG.getUNDEF(ContainerVT);
2687     if (HiV) {
2688       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2689       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2690       // causes multiple vsetvlis in some test cases such as lowering
2691       // reduce.mul
2692       SDValue DownVL = VL;
2693       if (LoV)
2694         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2695       Res =
2696           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2697                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2698     }
2699     if (LoV)
2700       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2701                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2702 
2703     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2704   }
2705 
2706   // Detect an interleave shuffle and lower to
2707   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2708   bool SwapSources;
2709   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2710     // Swap sources if needed.
2711     if (SwapSources)
2712       std::swap(V1, V2);
2713 
2714     // Extract the lower half of the vectors.
2715     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2716     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2717                      DAG.getConstant(0, DL, XLenVT));
2718     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2719                      DAG.getConstant(0, DL, XLenVT));
2720 
2721     // Double the element width and halve the number of elements in an int type.
2722     unsigned EltBits = VT.getScalarSizeInBits();
2723     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2724     MVT WideIntVT =
2725         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2726     // Convert this to a scalable vector. We need to base this on the
2727     // destination size to ensure there's always a type with a smaller LMUL.
2728     MVT WideIntContainerVT =
2729         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2730 
2731     // Convert sources to scalable vectors with the same element count as the
2732     // larger type.
2733     MVT HalfContainerVT = MVT::getVectorVT(
2734         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2735     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2736     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2737 
2738     // Cast sources to integer.
2739     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2740     MVT IntHalfVT =
2741         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2742     V1 = DAG.getBitcast(IntHalfVT, V1);
2743     V2 = DAG.getBitcast(IntHalfVT, V2);
2744 
2745     // Freeze V2 since we use it twice and we need to be sure that the add and
2746     // multiply see the same value.
2747     V2 = DAG.getNode(ISD::FREEZE, DL, IntHalfVT, V2);
2748 
2749     // Recreate TrueMask using the widened type's element count.
2750     MVT MaskVT =
2751         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2752     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2753 
2754     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2755     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2756                               V2, TrueMask, VL);
2757     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2758     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2759                                      DAG.getUNDEF(IntHalfVT),
2760                                      DAG.getAllOnesConstant(DL, XLenVT));
2761     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2762                                    V2, Multiplier, TrueMask, VL);
2763     // Add the new copies to our previous addition giving us 2^eltbits copies of
2764     // V2. This is equivalent to shifting V2 left by eltbits. This should
2765     // combine with the vwmulu.vv above to form vwmaccu.vv.
2766     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2767                       TrueMask, VL);
2768     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2769     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2770     // vector VT.
2771     ContainerVT =
2772         MVT::getVectorVT(VT.getVectorElementType(),
2773                          WideIntContainerVT.getVectorElementCount() * 2);
2774     Add = DAG.getBitcast(ContainerVT, Add);
2775     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2776   }
2777 
2778   // Detect shuffles which can be re-expressed as vector selects; these are
2779   // shuffles in which each element in the destination is taken from an element
2780   // at the corresponding index in either source vectors.
2781   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2782     int MaskIndex = MaskIdx.value();
2783     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2784   });
2785 
2786   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2787 
2788   SmallVector<SDValue> MaskVals;
2789   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2790   // merged with a second vrgather.
2791   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2792 
2793   // By default we preserve the original operand order, and use a mask to
2794   // select LHS as true and RHS as false. However, since RVV vector selects may
2795   // feature splats but only on the LHS, we may choose to invert our mask and
2796   // instead select between RHS and LHS.
2797   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2798   bool InvertMask = IsSelect == SwapOps;
2799 
2800   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2801   // half.
2802   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2803 
2804   // Now construct the mask that will be used by the vselect or blended
2805   // vrgather operation. For vrgathers, construct the appropriate indices into
2806   // each vector.
2807   for (int MaskIndex : Mask) {
2808     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2809     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2810     if (!IsSelect) {
2811       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2812       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2813                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2814                                      : DAG.getUNDEF(XLenVT));
2815       GatherIndicesRHS.push_back(
2816           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2817                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2818       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2819         ++LHSIndexCounts[MaskIndex];
2820       if (!IsLHSOrUndefIndex)
2821         ++RHSIndexCounts[MaskIndex - NumElts];
2822     }
2823   }
2824 
2825   if (SwapOps) {
2826     std::swap(V1, V2);
2827     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2828   }
2829 
2830   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2831   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2832   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2833 
2834   if (IsSelect)
2835     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2836 
2837   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2838     // On such a large vector we're unable to use i8 as the index type.
2839     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2840     // may involve vector splitting if we're already at LMUL=8, or our
2841     // user-supplied maximum fixed-length LMUL.
2842     return SDValue();
2843   }
2844 
2845   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2846   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2847   MVT IndexVT = VT.changeTypeToInteger();
2848   // Since we can't introduce illegal index types at this stage, use i16 and
2849   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2850   // than XLenVT.
2851   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2852     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2853     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2854   }
2855 
2856   MVT IndexContainerVT =
2857       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2858 
2859   SDValue Gather;
2860   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2861   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2862   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2863     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2864                               Subtarget);
2865   } else {
2866     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2867     // If only one index is used, we can use a "splat" vrgather.
2868     // TODO: We can splat the most-common index and fix-up any stragglers, if
2869     // that's beneficial.
2870     if (LHSIndexCounts.size() == 1) {
2871       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2872       Gather =
2873           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2874                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2875     } else {
2876       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2877       LHSIndices =
2878           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2879 
2880       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2881                            TrueMask, VL);
2882     }
2883   }
2884 
2885   // If a second vector operand is used by this shuffle, blend it in with an
2886   // additional vrgather.
2887   if (!V2.isUndef()) {
2888     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2889     // If only one index is used, we can use a "splat" vrgather.
2890     // TODO: We can splat the most-common index and fix-up any stragglers, if
2891     // that's beneficial.
2892     if (RHSIndexCounts.size() == 1) {
2893       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2894       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2895                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2896     } else {
2897       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2898       RHSIndices =
2899           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2900       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2901                        VL);
2902     }
2903 
2904     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2905     SelectMask =
2906         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2907 
2908     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2909                          Gather, VL);
2910   }
2911 
2912   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2913 }
2914 
2915 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2916   // Support splats for any type. These should type legalize well.
2917   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2918     return true;
2919 
2920   // Only support legal VTs for other shuffles for now.
2921   if (!isTypeLegal(VT))
2922     return false;
2923 
2924   MVT SVT = VT.getSimpleVT();
2925 
2926   bool SwapSources;
2927   int LoSrc, HiSrc;
2928   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2929          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2930 }
2931 
2932 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2933                                      SDLoc DL, SelectionDAG &DAG,
2934                                      const RISCVSubtarget &Subtarget) {
2935   if (VT.isScalableVector())
2936     return DAG.getFPExtendOrRound(Op, DL, VT);
2937   assert(VT.isFixedLengthVector() &&
2938          "Unexpected value type for RVV FP extend/round lowering");
2939   SDValue Mask, VL;
2940   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2941   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2942                         ? RISCVISD::FP_EXTEND_VL
2943                         : RISCVISD::FP_ROUND_VL;
2944   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2945 }
2946 
2947 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2948 // the exponent.
2949 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2950   MVT VT = Op.getSimpleValueType();
2951   unsigned EltSize = VT.getScalarSizeInBits();
2952   SDValue Src = Op.getOperand(0);
2953   SDLoc DL(Op);
2954 
2955   // We need a FP type that can represent the value.
2956   // TODO: Use f16 for i8 when possible?
2957   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2958   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2959 
2960   // Legal types should have been checked in the RISCVTargetLowering
2961   // constructor.
2962   // TODO: Splitting may make sense in some cases.
2963   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2964          "Expected legal float type!");
2965 
2966   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2967   // The trailing zero count is equal to log2 of this single bit value.
2968   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2969     SDValue Neg =
2970         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2971     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2972   }
2973 
2974   // We have a legal FP type, convert to it.
2975   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2976   // Bitcast to integer and shift the exponent to the LSB.
2977   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2978   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2979   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2980   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2981                               DAG.getConstant(ShiftAmt, DL, IntVT));
2982   // Truncate back to original type to allow vnsrl.
2983   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2984   // The exponent contains log2 of the value in biased form.
2985   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2986 
2987   // For trailing zeros, we just need to subtract the bias.
2988   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2989     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2990                        DAG.getConstant(ExponentBias, DL, VT));
2991 
2992   // For leading zeros, we need to remove the bias and convert from log2 to
2993   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2994   unsigned Adjust = ExponentBias + (EltSize - 1);
2995   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2996 }
2997 
2998 // While RVV has alignment restrictions, we should always be able to load as a
2999 // legal equivalently-sized byte-typed vector instead. This method is
3000 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
3001 // the load is already correctly-aligned, it returns SDValue().
3002 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
3003                                                     SelectionDAG &DAG) const {
3004   auto *Load = cast<LoadSDNode>(Op);
3005   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
3006 
3007   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
3008                                      Load->getMemoryVT(),
3009                                      *Load->getMemOperand()))
3010     return SDValue();
3011 
3012   SDLoc DL(Op);
3013   MVT VT = Op.getSimpleValueType();
3014   unsigned EltSizeBits = VT.getScalarSizeInBits();
3015   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3016          "Unexpected unaligned RVV load type");
3017   MVT NewVT =
3018       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3019   assert(NewVT.isValid() &&
3020          "Expecting equally-sized RVV vector types to be legal");
3021   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
3022                           Load->getPointerInfo(), Load->getOriginalAlign(),
3023                           Load->getMemOperand()->getFlags());
3024   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
3025 }
3026 
3027 // While RVV has alignment restrictions, we should always be able to store as a
3028 // legal equivalently-sized byte-typed vector instead. This method is
3029 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
3030 // returns SDValue() if the store is already correctly aligned.
3031 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
3032                                                      SelectionDAG &DAG) const {
3033   auto *Store = cast<StoreSDNode>(Op);
3034   assert(Store && Store->getValue().getValueType().isVector() &&
3035          "Expected vector store");
3036 
3037   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
3038                                      Store->getMemoryVT(),
3039                                      *Store->getMemOperand()))
3040     return SDValue();
3041 
3042   SDLoc DL(Op);
3043   SDValue StoredVal = Store->getValue();
3044   MVT VT = StoredVal.getSimpleValueType();
3045   unsigned EltSizeBits = VT.getScalarSizeInBits();
3046   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3047          "Unexpected unaligned RVV store type");
3048   MVT NewVT =
3049       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3050   assert(NewVT.isValid() &&
3051          "Expecting equally-sized RVV vector types to be legal");
3052   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3053   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3054                       Store->getPointerInfo(), Store->getOriginalAlign(),
3055                       Store->getMemOperand()->getFlags());
3056 }
3057 
3058 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3059                                             SelectionDAG &DAG) const {
3060   switch (Op.getOpcode()) {
3061   default:
3062     report_fatal_error("unimplemented operand");
3063   case ISD::GlobalAddress:
3064     return lowerGlobalAddress(Op, DAG);
3065   case ISD::BlockAddress:
3066     return lowerBlockAddress(Op, DAG);
3067   case ISD::ConstantPool:
3068     return lowerConstantPool(Op, DAG);
3069   case ISD::JumpTable:
3070     return lowerJumpTable(Op, DAG);
3071   case ISD::GlobalTLSAddress:
3072     return lowerGlobalTLSAddress(Op, DAG);
3073   case ISD::SELECT:
3074     return lowerSELECT(Op, DAG);
3075   case ISD::BRCOND:
3076     return lowerBRCOND(Op, DAG);
3077   case ISD::VASTART:
3078     return lowerVASTART(Op, DAG);
3079   case ISD::FRAMEADDR:
3080     return lowerFRAMEADDR(Op, DAG);
3081   case ISD::RETURNADDR:
3082     return lowerRETURNADDR(Op, DAG);
3083   case ISD::SHL_PARTS:
3084     return lowerShiftLeftParts(Op, DAG);
3085   case ISD::SRA_PARTS:
3086     return lowerShiftRightParts(Op, DAG, true);
3087   case ISD::SRL_PARTS:
3088     return lowerShiftRightParts(Op, DAG, false);
3089   case ISD::BITCAST: {
3090     SDLoc DL(Op);
3091     EVT VT = Op.getValueType();
3092     SDValue Op0 = Op.getOperand(0);
3093     EVT Op0VT = Op0.getValueType();
3094     MVT XLenVT = Subtarget.getXLenVT();
3095     if (VT.isFixedLengthVector()) {
3096       // We can handle fixed length vector bitcasts with a simple replacement
3097       // in isel.
3098       if (Op0VT.isFixedLengthVector())
3099         return Op;
3100       // When bitcasting from scalar to fixed-length vector, insert the scalar
3101       // into a one-element vector of the result type, and perform a vector
3102       // bitcast.
3103       if (!Op0VT.isVector()) {
3104         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3105         if (!isTypeLegal(BVT))
3106           return SDValue();
3107         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3108                                               DAG.getUNDEF(BVT), Op0,
3109                                               DAG.getConstant(0, DL, XLenVT)));
3110       }
3111       return SDValue();
3112     }
3113     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3114     // thus: bitcast the vector to a one-element vector type whose element type
3115     // is the same as the result type, and extract the first element.
3116     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3117       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3118       if (!isTypeLegal(BVT))
3119         return SDValue();
3120       SDValue BVec = DAG.getBitcast(BVT, Op0);
3121       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3122                          DAG.getConstant(0, DL, XLenVT));
3123     }
3124     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3125       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3126       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3127       return FPConv;
3128     }
3129     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3130         Subtarget.hasStdExtF()) {
3131       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3132       SDValue FPConv =
3133           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3134       return FPConv;
3135     }
3136     return SDValue();
3137   }
3138   case ISD::INTRINSIC_WO_CHAIN:
3139     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3140   case ISD::INTRINSIC_W_CHAIN:
3141     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3142   case ISD::INTRINSIC_VOID:
3143     return LowerINTRINSIC_VOID(Op, DAG);
3144   case ISD::BSWAP:
3145   case ISD::BITREVERSE: {
3146     MVT VT = Op.getSimpleValueType();
3147     SDLoc DL(Op);
3148     if (Subtarget.hasStdExtZbp()) {
3149       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3150       // Start with the maximum immediate value which is the bitwidth - 1.
3151       unsigned Imm = VT.getSizeInBits() - 1;
3152       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3153       if (Op.getOpcode() == ISD::BSWAP)
3154         Imm &= ~0x7U;
3155       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3156                          DAG.getConstant(Imm, DL, VT));
3157     }
3158     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3159     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3160     // Expand bitreverse to a bswap(rev8) followed by brev8.
3161     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3162     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3163     // as brev8 by an isel pattern.
3164     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3165                        DAG.getConstant(7, DL, VT));
3166   }
3167   case ISD::FSHL:
3168   case ISD::FSHR: {
3169     MVT VT = Op.getSimpleValueType();
3170     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3171     SDLoc DL(Op);
3172     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3173     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3174     // accidentally setting the extra bit.
3175     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3176     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3177                                 DAG.getConstant(ShAmtWidth, DL, VT));
3178     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3179     // instruction use different orders. fshl will return its first operand for
3180     // shift of zero, fshr will return its second operand. fsl and fsr both
3181     // return rs1 so the ISD nodes need to have different operand orders.
3182     // Shift amount is in rs2.
3183     SDValue Op0 = Op.getOperand(0);
3184     SDValue Op1 = Op.getOperand(1);
3185     unsigned Opc = RISCVISD::FSL;
3186     if (Op.getOpcode() == ISD::FSHR) {
3187       std::swap(Op0, Op1);
3188       Opc = RISCVISD::FSR;
3189     }
3190     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3191   }
3192   case ISD::TRUNCATE: {
3193     SDLoc DL(Op);
3194     MVT VT = Op.getSimpleValueType();
3195     // Only custom-lower vector truncates
3196     if (!VT.isVector())
3197       return Op;
3198 
3199     // Truncates to mask types are handled differently
3200     if (VT.getVectorElementType() == MVT::i1)
3201       return lowerVectorMaskTrunc(Op, DAG);
3202 
3203     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3204     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3205     // truncate by one power of two at a time.
3206     MVT DstEltVT = VT.getVectorElementType();
3207 
3208     SDValue Src = Op.getOperand(0);
3209     MVT SrcVT = Src.getSimpleValueType();
3210     MVT SrcEltVT = SrcVT.getVectorElementType();
3211 
3212     assert(DstEltVT.bitsLT(SrcEltVT) &&
3213            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3214            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3215            "Unexpected vector truncate lowering");
3216 
3217     MVT ContainerVT = SrcVT;
3218     if (SrcVT.isFixedLengthVector()) {
3219       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3220       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3221     }
3222 
3223     SDValue Result = Src;
3224     SDValue Mask, VL;
3225     std::tie(Mask, VL) =
3226         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3227     LLVMContext &Context = *DAG.getContext();
3228     const ElementCount Count = ContainerVT.getVectorElementCount();
3229     do {
3230       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3231       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3232       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3233                            Mask, VL);
3234     } while (SrcEltVT != DstEltVT);
3235 
3236     if (SrcVT.isFixedLengthVector())
3237       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3238 
3239     return Result;
3240   }
3241   case ISD::ANY_EXTEND:
3242   case ISD::ZERO_EXTEND:
3243     if (Op.getOperand(0).getValueType().isVector() &&
3244         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3245       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3246     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3247   case ISD::SIGN_EXTEND:
3248     if (Op.getOperand(0).getValueType().isVector() &&
3249         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3250       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3251     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3252   case ISD::SPLAT_VECTOR_PARTS:
3253     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3254   case ISD::INSERT_VECTOR_ELT:
3255     return lowerINSERT_VECTOR_ELT(Op, DAG);
3256   case ISD::EXTRACT_VECTOR_ELT:
3257     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3258   case ISD::VSCALE: {
3259     MVT VT = Op.getSimpleValueType();
3260     SDLoc DL(Op);
3261     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3262     // We define our scalable vector types for lmul=1 to use a 64 bit known
3263     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3264     // vscale as VLENB / 8.
3265     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3266     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3267       report_fatal_error("Support for VLEN==32 is incomplete.");
3268     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3269       // We assume VLENB is a multiple of 8. We manually choose the best shift
3270       // here because SimplifyDemandedBits isn't always able to simplify it.
3271       uint64_t Val = Op.getConstantOperandVal(0);
3272       if (isPowerOf2_64(Val)) {
3273         uint64_t Log2 = Log2_64(Val);
3274         if (Log2 < 3)
3275           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3276                              DAG.getConstant(3 - Log2, DL, VT));
3277         if (Log2 > 3)
3278           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3279                              DAG.getConstant(Log2 - 3, DL, VT));
3280         return VLENB;
3281       }
3282       // If the multiplier is a multiple of 8, scale it down to avoid needing
3283       // to shift the VLENB value.
3284       if ((Val % 8) == 0)
3285         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3286                            DAG.getConstant(Val / 8, DL, VT));
3287     }
3288 
3289     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3290                                  DAG.getConstant(3, DL, VT));
3291     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3292   }
3293   case ISD::FPOWI: {
3294     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3295     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3296     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3297         Op.getOperand(1).getValueType() == MVT::i32) {
3298       SDLoc DL(Op);
3299       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3300       SDValue Powi =
3301           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3302       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3303                          DAG.getIntPtrConstant(0, DL));
3304     }
3305     return SDValue();
3306   }
3307   case ISD::FP_EXTEND: {
3308     // RVV can only do fp_extend to types double the size as the source. We
3309     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3310     // via f32.
3311     SDLoc DL(Op);
3312     MVT VT = Op.getSimpleValueType();
3313     SDValue Src = Op.getOperand(0);
3314     MVT SrcVT = Src.getSimpleValueType();
3315 
3316     // Prepare any fixed-length vector operands.
3317     MVT ContainerVT = VT;
3318     if (SrcVT.isFixedLengthVector()) {
3319       ContainerVT = getContainerForFixedLengthVector(VT);
3320       MVT SrcContainerVT =
3321           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3322       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3323     }
3324 
3325     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3326         SrcVT.getVectorElementType() != MVT::f16) {
3327       // For scalable vectors, we only need to close the gap between
3328       // vXf16->vXf64.
3329       if (!VT.isFixedLengthVector())
3330         return Op;
3331       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3332       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3333       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3334     }
3335 
3336     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3337     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3338     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3339         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3340 
3341     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3342                                            DL, DAG, Subtarget);
3343     if (VT.isFixedLengthVector())
3344       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3345     return Extend;
3346   }
3347   case ISD::FP_ROUND: {
3348     // RVV can only do fp_round to types half the size as the source. We
3349     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3350     // conversion instruction.
3351     SDLoc DL(Op);
3352     MVT VT = Op.getSimpleValueType();
3353     SDValue Src = Op.getOperand(0);
3354     MVT SrcVT = Src.getSimpleValueType();
3355 
3356     // Prepare any fixed-length vector operands.
3357     MVT ContainerVT = VT;
3358     if (VT.isFixedLengthVector()) {
3359       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3360       ContainerVT =
3361           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3362       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3363     }
3364 
3365     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3366         SrcVT.getVectorElementType() != MVT::f64) {
3367       // For scalable vectors, we only need to close the gap between
3368       // vXf64<->vXf16.
3369       if (!VT.isFixedLengthVector())
3370         return Op;
3371       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3372       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3373       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3374     }
3375 
3376     SDValue Mask, VL;
3377     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3378 
3379     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3380     SDValue IntermediateRound =
3381         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3382     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3383                                           DL, DAG, Subtarget);
3384 
3385     if (VT.isFixedLengthVector())
3386       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3387     return Round;
3388   }
3389   case ISD::FP_TO_SINT:
3390   case ISD::FP_TO_UINT:
3391   case ISD::SINT_TO_FP:
3392   case ISD::UINT_TO_FP: {
3393     // RVV can only do fp<->int conversions to types half/double the size as
3394     // the source. We custom-lower any conversions that do two hops into
3395     // sequences.
3396     MVT VT = Op.getSimpleValueType();
3397     if (!VT.isVector())
3398       return Op;
3399     SDLoc DL(Op);
3400     SDValue Src = Op.getOperand(0);
3401     MVT EltVT = VT.getVectorElementType();
3402     MVT SrcVT = Src.getSimpleValueType();
3403     MVT SrcEltVT = SrcVT.getVectorElementType();
3404     unsigned EltSize = EltVT.getSizeInBits();
3405     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3406     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3407            "Unexpected vector element types");
3408 
3409     bool IsInt2FP = SrcEltVT.isInteger();
3410     // Widening conversions
3411     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3412       if (IsInt2FP) {
3413         // Do a regular integer sign/zero extension then convert to float.
3414         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3415                                       VT.getVectorElementCount());
3416         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3417                                  ? ISD::ZERO_EXTEND
3418                                  : ISD::SIGN_EXTEND;
3419         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3420         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3421       }
3422       // FP2Int
3423       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3424       // Do one doubling fp_extend then complete the operation by converting
3425       // to int.
3426       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3427       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3428       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3429     }
3430 
3431     // Narrowing conversions
3432     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3433       if (IsInt2FP) {
3434         // One narrowing int_to_fp, then an fp_round.
3435         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3436         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3437         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3438         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3439       }
3440       // FP2Int
3441       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3442       // representable by the integer, the result is poison.
3443       MVT IVecVT =
3444           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3445                            VT.getVectorElementCount());
3446       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3447       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3448     }
3449 
3450     // Scalable vectors can exit here. Patterns will handle equally-sized
3451     // conversions halving/doubling ones.
3452     if (!VT.isFixedLengthVector())
3453       return Op;
3454 
3455     // For fixed-length vectors we lower to a custom "VL" node.
3456     unsigned RVVOpc = 0;
3457     switch (Op.getOpcode()) {
3458     default:
3459       llvm_unreachable("Impossible opcode");
3460     case ISD::FP_TO_SINT:
3461       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3462       break;
3463     case ISD::FP_TO_UINT:
3464       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3465       break;
3466     case ISD::SINT_TO_FP:
3467       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3468       break;
3469     case ISD::UINT_TO_FP:
3470       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3471       break;
3472     }
3473 
3474     MVT ContainerVT, SrcContainerVT;
3475     // Derive the reference container type from the larger vector type.
3476     if (SrcEltSize > EltSize) {
3477       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3478       ContainerVT =
3479           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3480     } else {
3481       ContainerVT = getContainerForFixedLengthVector(VT);
3482       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3483     }
3484 
3485     SDValue Mask, VL;
3486     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3487 
3488     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3489     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3490     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3491   }
3492   case ISD::FP_TO_SINT_SAT:
3493   case ISD::FP_TO_UINT_SAT:
3494     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3495   case ISD::FTRUNC:
3496   case ISD::FCEIL:
3497   case ISD::FFLOOR:
3498     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3499   case ISD::FROUND:
3500     return lowerFROUND(Op, DAG);
3501   case ISD::VECREDUCE_ADD:
3502   case ISD::VECREDUCE_UMAX:
3503   case ISD::VECREDUCE_SMAX:
3504   case ISD::VECREDUCE_UMIN:
3505   case ISD::VECREDUCE_SMIN:
3506     return lowerVECREDUCE(Op, DAG);
3507   case ISD::VECREDUCE_AND:
3508   case ISD::VECREDUCE_OR:
3509   case ISD::VECREDUCE_XOR:
3510     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3511       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3512     return lowerVECREDUCE(Op, DAG);
3513   case ISD::VECREDUCE_FADD:
3514   case ISD::VECREDUCE_SEQ_FADD:
3515   case ISD::VECREDUCE_FMIN:
3516   case ISD::VECREDUCE_FMAX:
3517     return lowerFPVECREDUCE(Op, DAG);
3518   case ISD::VP_REDUCE_ADD:
3519   case ISD::VP_REDUCE_UMAX:
3520   case ISD::VP_REDUCE_SMAX:
3521   case ISD::VP_REDUCE_UMIN:
3522   case ISD::VP_REDUCE_SMIN:
3523   case ISD::VP_REDUCE_FADD:
3524   case ISD::VP_REDUCE_SEQ_FADD:
3525   case ISD::VP_REDUCE_FMIN:
3526   case ISD::VP_REDUCE_FMAX:
3527     return lowerVPREDUCE(Op, DAG);
3528   case ISD::VP_REDUCE_AND:
3529   case ISD::VP_REDUCE_OR:
3530   case ISD::VP_REDUCE_XOR:
3531     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3532       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3533     return lowerVPREDUCE(Op, DAG);
3534   case ISD::INSERT_SUBVECTOR:
3535     return lowerINSERT_SUBVECTOR(Op, DAG);
3536   case ISD::EXTRACT_SUBVECTOR:
3537     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3538   case ISD::STEP_VECTOR:
3539     return lowerSTEP_VECTOR(Op, DAG);
3540   case ISD::VECTOR_REVERSE:
3541     return lowerVECTOR_REVERSE(Op, DAG);
3542   case ISD::BUILD_VECTOR:
3543     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3544   case ISD::SPLAT_VECTOR:
3545     if (Op.getValueType().getVectorElementType() == MVT::i1)
3546       return lowerVectorMaskSplat(Op, DAG);
3547     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3548   case ISD::VECTOR_SHUFFLE:
3549     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3550   case ISD::CONCAT_VECTORS: {
3551     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3552     // better than going through the stack, as the default expansion does.
3553     SDLoc DL(Op);
3554     MVT VT = Op.getSimpleValueType();
3555     unsigned NumOpElts =
3556         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3557     SDValue Vec = DAG.getUNDEF(VT);
3558     for (const auto &OpIdx : enumerate(Op->ops())) {
3559       SDValue SubVec = OpIdx.value();
3560       // Don't insert undef subvectors.
3561       if (SubVec.isUndef())
3562         continue;
3563       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3564                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3565     }
3566     return Vec;
3567   }
3568   case ISD::LOAD:
3569     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3570       return V;
3571     if (Op.getValueType().isFixedLengthVector())
3572       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3573     return Op;
3574   case ISD::STORE:
3575     if (auto V = expandUnalignedRVVStore(Op, DAG))
3576       return V;
3577     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3578       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3579     return Op;
3580   case ISD::MLOAD:
3581   case ISD::VP_LOAD:
3582     return lowerMaskedLoad(Op, DAG);
3583   case ISD::MSTORE:
3584   case ISD::VP_STORE:
3585     return lowerMaskedStore(Op, DAG);
3586   case ISD::SETCC:
3587     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3588   case ISD::ADD:
3589     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3590   case ISD::SUB:
3591     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3592   case ISD::MUL:
3593     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3594   case ISD::MULHS:
3595     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3596   case ISD::MULHU:
3597     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3598   case ISD::AND:
3599     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3600                                               RISCVISD::AND_VL);
3601   case ISD::OR:
3602     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3603                                               RISCVISD::OR_VL);
3604   case ISD::XOR:
3605     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3606                                               RISCVISD::XOR_VL);
3607   case ISD::SDIV:
3608     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3609   case ISD::SREM:
3610     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3611   case ISD::UDIV:
3612     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3613   case ISD::UREM:
3614     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3615   case ISD::SHL:
3616   case ISD::SRA:
3617   case ISD::SRL:
3618     if (Op.getSimpleValueType().isFixedLengthVector())
3619       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3620     // This can be called for an i32 shift amount that needs to be promoted.
3621     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3622            "Unexpected custom legalisation");
3623     return SDValue();
3624   case ISD::SADDSAT:
3625     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3626   case ISD::UADDSAT:
3627     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3628   case ISD::SSUBSAT:
3629     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3630   case ISD::USUBSAT:
3631     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3632   case ISD::FADD:
3633     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3634   case ISD::FSUB:
3635     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3636   case ISD::FMUL:
3637     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3638   case ISD::FDIV:
3639     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3640   case ISD::FNEG:
3641     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3642   case ISD::FABS:
3643     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3644   case ISD::FSQRT:
3645     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3646   case ISD::FMA:
3647     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3648   case ISD::SMIN:
3649     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3650   case ISD::SMAX:
3651     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3652   case ISD::UMIN:
3653     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3654   case ISD::UMAX:
3655     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3656   case ISD::FMINNUM:
3657     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3658   case ISD::FMAXNUM:
3659     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3660   case ISD::ABS:
3661     return lowerABS(Op, DAG);
3662   case ISD::CTLZ_ZERO_UNDEF:
3663   case ISD::CTTZ_ZERO_UNDEF:
3664     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3665   case ISD::VSELECT:
3666     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3667   case ISD::FCOPYSIGN:
3668     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3669   case ISD::MGATHER:
3670   case ISD::VP_GATHER:
3671     return lowerMaskedGather(Op, DAG);
3672   case ISD::MSCATTER:
3673   case ISD::VP_SCATTER:
3674     return lowerMaskedScatter(Op, DAG);
3675   case ISD::FLT_ROUNDS_:
3676     return lowerGET_ROUNDING(Op, DAG);
3677   case ISD::SET_ROUNDING:
3678     return lowerSET_ROUNDING(Op, DAG);
3679   case ISD::VP_SELECT:
3680     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3681   case ISD::VP_MERGE:
3682     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3683   case ISD::VP_ADD:
3684     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3685   case ISD::VP_SUB:
3686     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3687   case ISD::VP_MUL:
3688     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3689   case ISD::VP_SDIV:
3690     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3691   case ISD::VP_UDIV:
3692     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3693   case ISD::VP_SREM:
3694     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3695   case ISD::VP_UREM:
3696     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3697   case ISD::VP_AND:
3698     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3699   case ISD::VP_OR:
3700     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3701   case ISD::VP_XOR:
3702     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3703   case ISD::VP_ASHR:
3704     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3705   case ISD::VP_LSHR:
3706     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3707   case ISD::VP_SHL:
3708     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3709   case ISD::VP_FADD:
3710     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3711   case ISD::VP_FSUB:
3712     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3713   case ISD::VP_FMUL:
3714     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3715   case ISD::VP_FDIV:
3716     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3717   case ISD::VP_FNEG:
3718     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3719   case ISD::VP_FMA:
3720     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3721   }
3722 }
3723 
3724 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3725                              SelectionDAG &DAG, unsigned Flags) {
3726   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3727 }
3728 
3729 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3730                              SelectionDAG &DAG, unsigned Flags) {
3731   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3732                                    Flags);
3733 }
3734 
3735 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3736                              SelectionDAG &DAG, unsigned Flags) {
3737   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3738                                    N->getOffset(), Flags);
3739 }
3740 
3741 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3742                              SelectionDAG &DAG, unsigned Flags) {
3743   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3744 }
3745 
3746 template <class NodeTy>
3747 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3748                                      bool IsLocal) const {
3749   SDLoc DL(N);
3750   EVT Ty = getPointerTy(DAG.getDataLayout());
3751 
3752   if (isPositionIndependent()) {
3753     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3754     if (IsLocal)
3755       // Use PC-relative addressing to access the symbol. This generates the
3756       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3757       // %pcrel_lo(auipc)).
3758       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3759 
3760     // Use PC-relative addressing to access the GOT for this symbol, then load
3761     // the address from the GOT. This generates the pattern (PseudoLA sym),
3762     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3763     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3764   }
3765 
3766   switch (getTargetMachine().getCodeModel()) {
3767   default:
3768     report_fatal_error("Unsupported code model for lowering");
3769   case CodeModel::Small: {
3770     // Generate a sequence for accessing addresses within the first 2 GiB of
3771     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3772     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3773     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3774     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3775     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3776   }
3777   case CodeModel::Medium: {
3778     // Generate a sequence for accessing addresses within any 2GiB range within
3779     // the address space. This generates the pattern (PseudoLLA sym), which
3780     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3781     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3782     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3783   }
3784   }
3785 }
3786 
3787 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3788                                                 SelectionDAG &DAG) const {
3789   SDLoc DL(Op);
3790   EVT Ty = Op.getValueType();
3791   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3792   int64_t Offset = N->getOffset();
3793   MVT XLenVT = Subtarget.getXLenVT();
3794 
3795   const GlobalValue *GV = N->getGlobal();
3796   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3797   SDValue Addr = getAddr(N, DAG, IsLocal);
3798 
3799   // In order to maximise the opportunity for common subexpression elimination,
3800   // emit a separate ADD node for the global address offset instead of folding
3801   // it in the global address node. Later peephole optimisations may choose to
3802   // fold it back in when profitable.
3803   if (Offset != 0)
3804     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3805                        DAG.getConstant(Offset, DL, XLenVT));
3806   return Addr;
3807 }
3808 
3809 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3810                                                SelectionDAG &DAG) const {
3811   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3812 
3813   return getAddr(N, DAG);
3814 }
3815 
3816 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3817                                                SelectionDAG &DAG) const {
3818   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3819 
3820   return getAddr(N, DAG);
3821 }
3822 
3823 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3824                                             SelectionDAG &DAG) const {
3825   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3826 
3827   return getAddr(N, DAG);
3828 }
3829 
3830 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3831                                               SelectionDAG &DAG,
3832                                               bool UseGOT) const {
3833   SDLoc DL(N);
3834   EVT Ty = getPointerTy(DAG.getDataLayout());
3835   const GlobalValue *GV = N->getGlobal();
3836   MVT XLenVT = Subtarget.getXLenVT();
3837 
3838   if (UseGOT) {
3839     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3840     // load the address from the GOT and add the thread pointer. This generates
3841     // the pattern (PseudoLA_TLS_IE sym), which expands to
3842     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3843     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3844     SDValue Load =
3845         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3846 
3847     // Add the thread pointer.
3848     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3849     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3850   }
3851 
3852   // Generate a sequence for accessing the address relative to the thread
3853   // pointer, with the appropriate adjustment for the thread pointer offset.
3854   // This generates the pattern
3855   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3856   SDValue AddrHi =
3857       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3858   SDValue AddrAdd =
3859       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3860   SDValue AddrLo =
3861       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3862 
3863   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3864   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3865   SDValue MNAdd = SDValue(
3866       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3867       0);
3868   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3869 }
3870 
3871 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3872                                                SelectionDAG &DAG) const {
3873   SDLoc DL(N);
3874   EVT Ty = getPointerTy(DAG.getDataLayout());
3875   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3876   const GlobalValue *GV = N->getGlobal();
3877 
3878   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3879   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3880   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3881   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3882   SDValue Load =
3883       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3884 
3885   // Prepare argument list to generate call.
3886   ArgListTy Args;
3887   ArgListEntry Entry;
3888   Entry.Node = Load;
3889   Entry.Ty = CallTy;
3890   Args.push_back(Entry);
3891 
3892   // Setup call to __tls_get_addr.
3893   TargetLowering::CallLoweringInfo CLI(DAG);
3894   CLI.setDebugLoc(DL)
3895       .setChain(DAG.getEntryNode())
3896       .setLibCallee(CallingConv::C, CallTy,
3897                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3898                     std::move(Args));
3899 
3900   return LowerCallTo(CLI).first;
3901 }
3902 
3903 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3904                                                    SelectionDAG &DAG) const {
3905   SDLoc DL(Op);
3906   EVT Ty = Op.getValueType();
3907   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3908   int64_t Offset = N->getOffset();
3909   MVT XLenVT = Subtarget.getXLenVT();
3910 
3911   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3912 
3913   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3914       CallingConv::GHC)
3915     report_fatal_error("In GHC calling convention TLS is not supported");
3916 
3917   SDValue Addr;
3918   switch (Model) {
3919   case TLSModel::LocalExec:
3920     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3921     break;
3922   case TLSModel::InitialExec:
3923     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3924     break;
3925   case TLSModel::LocalDynamic:
3926   case TLSModel::GeneralDynamic:
3927     Addr = getDynamicTLSAddr(N, DAG);
3928     break;
3929   }
3930 
3931   // In order to maximise the opportunity for common subexpression elimination,
3932   // emit a separate ADD node for the global address offset instead of folding
3933   // it in the global address node. Later peephole optimisations may choose to
3934   // fold it back in when profitable.
3935   if (Offset != 0)
3936     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3937                        DAG.getConstant(Offset, DL, XLenVT));
3938   return Addr;
3939 }
3940 
3941 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3942   SDValue CondV = Op.getOperand(0);
3943   SDValue TrueV = Op.getOperand(1);
3944   SDValue FalseV = Op.getOperand(2);
3945   SDLoc DL(Op);
3946   MVT VT = Op.getSimpleValueType();
3947   MVT XLenVT = Subtarget.getXLenVT();
3948 
3949   // Lower vector SELECTs to VSELECTs by splatting the condition.
3950   if (VT.isVector()) {
3951     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3952     SDValue CondSplat = VT.isScalableVector()
3953                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3954                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3955     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3956   }
3957 
3958   // If the result type is XLenVT and CondV is the output of a SETCC node
3959   // which also operated on XLenVT inputs, then merge the SETCC node into the
3960   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3961   // compare+branch instructions. i.e.:
3962   // (select (setcc lhs, rhs, cc), truev, falsev)
3963   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3964   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3965       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3966     SDValue LHS = CondV.getOperand(0);
3967     SDValue RHS = CondV.getOperand(1);
3968     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3969     ISD::CondCode CCVal = CC->get();
3970 
3971     // Special case for a select of 2 constants that have a diffence of 1.
3972     // Normally this is done by DAGCombine, but if the select is introduced by
3973     // type legalization or op legalization, we miss it. Restricting to SETLT
3974     // case for now because that is what signed saturating add/sub need.
3975     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3976     // but we would probably want to swap the true/false values if the condition
3977     // is SETGE/SETLE to avoid an XORI.
3978     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3979         CCVal == ISD::SETLT) {
3980       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3981       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3982       if (TrueVal - 1 == FalseVal)
3983         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3984       if (TrueVal + 1 == FalseVal)
3985         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3986     }
3987 
3988     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3989 
3990     SDValue TargetCC = DAG.getCondCode(CCVal);
3991     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3992     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3993   }
3994 
3995   // Otherwise:
3996   // (select condv, truev, falsev)
3997   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3998   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3999   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
4000 
4001   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
4002 
4003   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
4004 }
4005 
4006 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
4007   SDValue CondV = Op.getOperand(1);
4008   SDLoc DL(Op);
4009   MVT XLenVT = Subtarget.getXLenVT();
4010 
4011   if (CondV.getOpcode() == ISD::SETCC &&
4012       CondV.getOperand(0).getValueType() == XLenVT) {
4013     SDValue LHS = CondV.getOperand(0);
4014     SDValue RHS = CondV.getOperand(1);
4015     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
4016 
4017     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4018 
4019     SDValue TargetCC = DAG.getCondCode(CCVal);
4020     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4021                        LHS, RHS, TargetCC, Op.getOperand(2));
4022   }
4023 
4024   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4025                      CondV, DAG.getConstant(0, DL, XLenVT),
4026                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
4027 }
4028 
4029 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
4030   MachineFunction &MF = DAG.getMachineFunction();
4031   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
4032 
4033   SDLoc DL(Op);
4034   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
4035                                  getPointerTy(MF.getDataLayout()));
4036 
4037   // vastart just stores the address of the VarArgsFrameIndex slot into the
4038   // memory location argument.
4039   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4040   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
4041                       MachinePointerInfo(SV));
4042 }
4043 
4044 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
4045                                             SelectionDAG &DAG) const {
4046   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4047   MachineFunction &MF = DAG.getMachineFunction();
4048   MachineFrameInfo &MFI = MF.getFrameInfo();
4049   MFI.setFrameAddressIsTaken(true);
4050   Register FrameReg = RI.getFrameRegister(MF);
4051   int XLenInBytes = Subtarget.getXLen() / 8;
4052 
4053   EVT VT = Op.getValueType();
4054   SDLoc DL(Op);
4055   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
4056   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4057   while (Depth--) {
4058     int Offset = -(XLenInBytes * 2);
4059     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
4060                               DAG.getIntPtrConstant(Offset, DL));
4061     FrameAddr =
4062         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
4063   }
4064   return FrameAddr;
4065 }
4066 
4067 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
4068                                              SelectionDAG &DAG) const {
4069   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4070   MachineFunction &MF = DAG.getMachineFunction();
4071   MachineFrameInfo &MFI = MF.getFrameInfo();
4072   MFI.setReturnAddressIsTaken(true);
4073   MVT XLenVT = Subtarget.getXLenVT();
4074   int XLenInBytes = Subtarget.getXLen() / 8;
4075 
4076   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4077     return SDValue();
4078 
4079   EVT VT = Op.getValueType();
4080   SDLoc DL(Op);
4081   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4082   if (Depth) {
4083     int Off = -XLenInBytes;
4084     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4085     SDValue Offset = DAG.getConstant(Off, DL, VT);
4086     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4087                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4088                        MachinePointerInfo());
4089   }
4090 
4091   // Return the value of the return address register, marking it an implicit
4092   // live-in.
4093   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4094   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4095 }
4096 
4097 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4098                                                  SelectionDAG &DAG) const {
4099   SDLoc DL(Op);
4100   SDValue Lo = Op.getOperand(0);
4101   SDValue Hi = Op.getOperand(1);
4102   SDValue Shamt = Op.getOperand(2);
4103   EVT VT = Lo.getValueType();
4104 
4105   // if Shamt-XLEN < 0: // Shamt < XLEN
4106   //   Lo = Lo << Shamt
4107   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
4108   // else:
4109   //   Lo = 0
4110   //   Hi = Lo << (Shamt-XLEN)
4111 
4112   SDValue Zero = DAG.getConstant(0, DL, VT);
4113   SDValue One = DAG.getConstant(1, DL, VT);
4114   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4115   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4116   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4117   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4118 
4119   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4120   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4121   SDValue ShiftRightLo =
4122       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4123   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4124   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4125   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4126 
4127   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4128 
4129   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4130   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4131 
4132   SDValue Parts[2] = {Lo, Hi};
4133   return DAG.getMergeValues(Parts, DL);
4134 }
4135 
4136 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4137                                                   bool IsSRA) const {
4138   SDLoc DL(Op);
4139   SDValue Lo = Op.getOperand(0);
4140   SDValue Hi = Op.getOperand(1);
4141   SDValue Shamt = Op.getOperand(2);
4142   EVT VT = Lo.getValueType();
4143 
4144   // SRA expansion:
4145   //   if Shamt-XLEN < 0: // Shamt < XLEN
4146   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4147   //     Hi = Hi >>s Shamt
4148   //   else:
4149   //     Lo = Hi >>s (Shamt-XLEN);
4150   //     Hi = Hi >>s (XLEN-1)
4151   //
4152   // SRL expansion:
4153   //   if Shamt-XLEN < 0: // Shamt < XLEN
4154   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4155   //     Hi = Hi >>u Shamt
4156   //   else:
4157   //     Lo = Hi >>u (Shamt-XLEN);
4158   //     Hi = 0;
4159 
4160   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4161 
4162   SDValue Zero = DAG.getConstant(0, DL, VT);
4163   SDValue One = DAG.getConstant(1, DL, VT);
4164   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4165   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4166   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4167   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4168 
4169   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4170   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4171   SDValue ShiftLeftHi =
4172       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4173   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4174   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4175   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4176   SDValue HiFalse =
4177       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4178 
4179   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4180 
4181   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4182   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4183 
4184   SDValue Parts[2] = {Lo, Hi};
4185   return DAG.getMergeValues(Parts, DL);
4186 }
4187 
4188 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4189 // legal equivalently-sized i8 type, so we can use that as a go-between.
4190 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4191                                                   SelectionDAG &DAG) const {
4192   SDLoc DL(Op);
4193   MVT VT = Op.getSimpleValueType();
4194   SDValue SplatVal = Op.getOperand(0);
4195   // All-zeros or all-ones splats are handled specially.
4196   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4197     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4198     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4199   }
4200   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4201     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4202     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4203   }
4204   MVT XLenVT = Subtarget.getXLenVT();
4205   assert(SplatVal.getValueType() == XLenVT &&
4206          "Unexpected type for i1 splat value");
4207   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4208   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4209                          DAG.getConstant(1, DL, XLenVT));
4210   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4211   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4212   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4213 }
4214 
4215 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4216 // illegal (currently only vXi64 RV32).
4217 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4218 // them to VMV_V_X_VL.
4219 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4220                                                      SelectionDAG &DAG) const {
4221   SDLoc DL(Op);
4222   MVT VecVT = Op.getSimpleValueType();
4223   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4224          "Unexpected SPLAT_VECTOR_PARTS lowering");
4225 
4226   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4227   SDValue Lo = Op.getOperand(0);
4228   SDValue Hi = Op.getOperand(1);
4229 
4230   if (VecVT.isFixedLengthVector()) {
4231     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4232     SDLoc DL(Op);
4233     SDValue Mask, VL;
4234     std::tie(Mask, VL) =
4235         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4236 
4237     SDValue Res =
4238         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4239     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4240   }
4241 
4242   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4243     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4244     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4245     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4246     // node in order to try and match RVV vector/scalar instructions.
4247     if ((LoC >> 31) == HiC)
4248       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4249                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4250   }
4251 
4252   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4253   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4254       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4255       Hi.getConstantOperandVal(1) == 31)
4256     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4257                        DAG.getRegister(RISCV::X0, MVT::i32));
4258 
4259   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4260   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4261                      DAG.getUNDEF(VecVT), Lo, Hi,
4262                      DAG.getRegister(RISCV::X0, MVT::i32));
4263 }
4264 
4265 // Custom-lower extensions from mask vectors by using a vselect either with 1
4266 // for zero/any-extension or -1 for sign-extension:
4267 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4268 // Note that any-extension is lowered identically to zero-extension.
4269 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4270                                                 int64_t ExtTrueVal) const {
4271   SDLoc DL(Op);
4272   MVT VecVT = Op.getSimpleValueType();
4273   SDValue Src = Op.getOperand(0);
4274   // Only custom-lower extensions from mask types
4275   assert(Src.getValueType().isVector() &&
4276          Src.getValueType().getVectorElementType() == MVT::i1);
4277 
4278   MVT XLenVT = Subtarget.getXLenVT();
4279   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4280   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4281 
4282   if (VecVT.isScalableVector()) {
4283     // Be careful not to introduce illegal scalar types at this stage, and be
4284     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4285     // illegal and must be expanded. Since we know that the constants are
4286     // sign-extended 32-bit values, we use VMV_V_X_VL directly.
4287     bool IsRV32E64 =
4288         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4289 
4290     if (!IsRV32E64) {
4291       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4292       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4293     } else {
4294       SplatZero =
4295           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4296                       SplatZero, DAG.getRegister(RISCV::X0, XLenVT));
4297       SplatTrueVal =
4298           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4299                       SplatTrueVal, DAG.getRegister(RISCV::X0, XLenVT));
4300     }
4301 
4302     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4303   }
4304 
4305   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4306   MVT I1ContainerVT =
4307       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4308 
4309   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4310 
4311   SDValue Mask, VL;
4312   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4313 
4314   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4315                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4316   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4317                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4318   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4319                                SplatTrueVal, SplatZero, VL);
4320 
4321   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4322 }
4323 
4324 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4325     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4326   MVT ExtVT = Op.getSimpleValueType();
4327   // Only custom-lower extensions from fixed-length vector types.
4328   if (!ExtVT.isFixedLengthVector())
4329     return Op;
4330   MVT VT = Op.getOperand(0).getSimpleValueType();
4331   // Grab the canonical container type for the extended type. Infer the smaller
4332   // type from that to ensure the same number of vector elements, as we know
4333   // the LMUL will be sufficient to hold the smaller type.
4334   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4335   // Get the extended container type manually to ensure the same number of
4336   // vector elements between source and dest.
4337   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4338                                      ContainerExtVT.getVectorElementCount());
4339 
4340   SDValue Op1 =
4341       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4342 
4343   SDLoc DL(Op);
4344   SDValue Mask, VL;
4345   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4346 
4347   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4348 
4349   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4350 }
4351 
4352 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4353 // setcc operation:
4354 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4355 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4356                                                   SelectionDAG &DAG) const {
4357   SDLoc DL(Op);
4358   EVT MaskVT = Op.getValueType();
4359   // Only expect to custom-lower truncations to mask types
4360   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4361          "Unexpected type for vector mask lowering");
4362   SDValue Src = Op.getOperand(0);
4363   MVT VecVT = Src.getSimpleValueType();
4364 
4365   // If this is a fixed vector, we need to convert it to a scalable vector.
4366   MVT ContainerVT = VecVT;
4367   if (VecVT.isFixedLengthVector()) {
4368     ContainerVT = getContainerForFixedLengthVector(VecVT);
4369     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4370   }
4371 
4372   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4373   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4374 
4375   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4376                          DAG.getUNDEF(ContainerVT), SplatOne);
4377   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4378                           DAG.getUNDEF(ContainerVT), SplatZero);
4379 
4380   if (VecVT.isScalableVector()) {
4381     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4382     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4383   }
4384 
4385   SDValue Mask, VL;
4386   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4387 
4388   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4389   SDValue Trunc =
4390       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4391   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4392                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4393   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4394 }
4395 
4396 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4397 // first position of a vector, and that vector is slid up to the insert index.
4398 // By limiting the active vector length to index+1 and merging with the
4399 // original vector (with an undisturbed tail policy for elements >= VL), we
4400 // achieve the desired result of leaving all elements untouched except the one
4401 // at VL-1, which is replaced with the desired value.
4402 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4403                                                     SelectionDAG &DAG) const {
4404   SDLoc DL(Op);
4405   MVT VecVT = Op.getSimpleValueType();
4406   SDValue Vec = Op.getOperand(0);
4407   SDValue Val = Op.getOperand(1);
4408   SDValue Idx = Op.getOperand(2);
4409 
4410   if (VecVT.getVectorElementType() == MVT::i1) {
4411     // FIXME: For now we just promote to an i8 vector and insert into that,
4412     // but this is probably not optimal.
4413     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4414     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4415     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4416     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4417   }
4418 
4419   MVT ContainerVT = VecVT;
4420   // If the operand is a fixed-length vector, convert to a scalable one.
4421   if (VecVT.isFixedLengthVector()) {
4422     ContainerVT = getContainerForFixedLengthVector(VecVT);
4423     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4424   }
4425 
4426   MVT XLenVT = Subtarget.getXLenVT();
4427 
4428   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4429   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4430   // Even i64-element vectors on RV32 can be lowered without scalar
4431   // legalization if the most-significant 32 bits of the value are not affected
4432   // by the sign-extension of the lower 32 bits.
4433   // TODO: We could also catch sign extensions of a 32-bit value.
4434   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4435     const auto *CVal = cast<ConstantSDNode>(Val);
4436     if (isInt<32>(CVal->getSExtValue())) {
4437       IsLegalInsert = true;
4438       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4439     }
4440   }
4441 
4442   SDValue Mask, VL;
4443   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4444 
4445   SDValue ValInVec;
4446 
4447   if (IsLegalInsert) {
4448     unsigned Opc =
4449         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4450     if (isNullConstant(Idx)) {
4451       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4452       if (!VecVT.isFixedLengthVector())
4453         return Vec;
4454       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4455     }
4456     ValInVec =
4457         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4458   } else {
4459     // On RV32, i64-element vectors must be specially handled to place the
4460     // value at element 0, by using two vslide1up instructions in sequence on
4461     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4462     // this.
4463     SDValue One = DAG.getConstant(1, DL, XLenVT);
4464     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4465     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4466     MVT I32ContainerVT =
4467         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4468     SDValue I32Mask =
4469         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4470     // Limit the active VL to two.
4471     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4472     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4473     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4474     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4475                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4476     // First slide in the hi value, then the lo in underneath it.
4477     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4478                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4479                            I32Mask, InsertI64VL);
4480     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4481                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4482                            I32Mask, InsertI64VL);
4483     // Bitcast back to the right container type.
4484     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4485   }
4486 
4487   // Now that the value is in a vector, slide it into position.
4488   SDValue InsertVL =
4489       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4490   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4491                                 ValInVec, Idx, Mask, InsertVL);
4492   if (!VecVT.isFixedLengthVector())
4493     return Slideup;
4494   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4495 }
4496 
4497 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4498 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4499 // types this is done using VMV_X_S to allow us to glean information about the
4500 // sign bits of the result.
4501 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4502                                                      SelectionDAG &DAG) const {
4503   SDLoc DL(Op);
4504   SDValue Idx = Op.getOperand(1);
4505   SDValue Vec = Op.getOperand(0);
4506   EVT EltVT = Op.getValueType();
4507   MVT VecVT = Vec.getSimpleValueType();
4508   MVT XLenVT = Subtarget.getXLenVT();
4509 
4510   if (VecVT.getVectorElementType() == MVT::i1) {
4511     if (VecVT.isFixedLengthVector()) {
4512       unsigned NumElts = VecVT.getVectorNumElements();
4513       if (NumElts >= 8) {
4514         MVT WideEltVT;
4515         unsigned WidenVecLen;
4516         SDValue ExtractElementIdx;
4517         SDValue ExtractBitIdx;
4518         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4519         MVT LargestEltVT = MVT::getIntegerVT(
4520             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4521         if (NumElts <= LargestEltVT.getSizeInBits()) {
4522           assert(isPowerOf2_32(NumElts) &&
4523                  "the number of elements should be power of 2");
4524           WideEltVT = MVT::getIntegerVT(NumElts);
4525           WidenVecLen = 1;
4526           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4527           ExtractBitIdx = Idx;
4528         } else {
4529           WideEltVT = LargestEltVT;
4530           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4531           // extract element index = index / element width
4532           ExtractElementIdx = DAG.getNode(
4533               ISD::SRL, DL, XLenVT, Idx,
4534               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4535           // mask bit index = index % element width
4536           ExtractBitIdx = DAG.getNode(
4537               ISD::AND, DL, XLenVT, Idx,
4538               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4539         }
4540         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4541         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4542         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4543                                          Vec, ExtractElementIdx);
4544         // Extract the bit from GPR.
4545         SDValue ShiftRight =
4546             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4547         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4548                            DAG.getConstant(1, DL, XLenVT));
4549       }
4550     }
4551     // Otherwise, promote to an i8 vector and extract from that.
4552     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4553     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4554     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4555   }
4556 
4557   // If this is a fixed vector, we need to convert it to a scalable vector.
4558   MVT ContainerVT = VecVT;
4559   if (VecVT.isFixedLengthVector()) {
4560     ContainerVT = getContainerForFixedLengthVector(VecVT);
4561     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4562   }
4563 
4564   // If the index is 0, the vector is already in the right position.
4565   if (!isNullConstant(Idx)) {
4566     // Use a VL of 1 to avoid processing more elements than we need.
4567     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4568     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4569     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4570     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4571                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4572   }
4573 
4574   if (!EltVT.isInteger()) {
4575     // Floating-point extracts are handled in TableGen.
4576     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4577                        DAG.getConstant(0, DL, XLenVT));
4578   }
4579 
4580   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4581   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4582 }
4583 
4584 // Some RVV intrinsics may claim that they want an integer operand to be
4585 // promoted or expanded.
4586 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4587                                           const RISCVSubtarget &Subtarget) {
4588   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4589           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4590          "Unexpected opcode");
4591 
4592   if (!Subtarget.hasVInstructions())
4593     return SDValue();
4594 
4595   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4596   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4597   SDLoc DL(Op);
4598 
4599   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4600       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4601   if (!II || !II->hasSplatOperand())
4602     return SDValue();
4603 
4604   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4605   assert(SplatOp < Op.getNumOperands());
4606 
4607   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4608   SDValue &ScalarOp = Operands[SplatOp];
4609   MVT OpVT = ScalarOp.getSimpleValueType();
4610   MVT XLenVT = Subtarget.getXLenVT();
4611 
4612   // If this isn't a scalar, or its type is XLenVT we're done.
4613   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4614     return SDValue();
4615 
4616   // Simplest case is that the operand needs to be promoted to XLenVT.
4617   if (OpVT.bitsLT(XLenVT)) {
4618     // If the operand is a constant, sign extend to increase our chances
4619     // of being able to use a .vi instruction. ANY_EXTEND would become a
4620     // a zero extend and the simm5 check in isel would fail.
4621     // FIXME: Should we ignore the upper bits in isel instead?
4622     unsigned ExtOpc =
4623         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4624     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4625     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4626   }
4627 
4628   // Use the previous operand to get the vXi64 VT. The result might be a mask
4629   // VT for compares. Using the previous operand assumes that the previous
4630   // operand will never have a smaller element size than a scalar operand and
4631   // that a widening operation never uses SEW=64.
4632   // NOTE: If this fails the below assert, we can probably just find the
4633   // element count from any operand or result and use it to construct the VT.
4634   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4635   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4636 
4637   // The more complex case is when the scalar is larger than XLenVT.
4638   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4639          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4640 
4641   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4642   // on the instruction to sign-extend since SEW>XLEN.
4643   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4644     if (isInt<32>(CVal->getSExtValue())) {
4645       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4646       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4647     }
4648   }
4649 
4650   // We need to convert the scalar to a splat vector.
4651   // FIXME: Can we implicitly truncate the scalar if it is known to
4652   // be sign extended?
4653   SDValue VL = getVLOperand(Op);
4654   assert(VL.getValueType() == XLenVT);
4655   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4656   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4657 }
4658 
4659 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4660                                                      SelectionDAG &DAG) const {
4661   unsigned IntNo = Op.getConstantOperandVal(0);
4662   SDLoc DL(Op);
4663   MVT XLenVT = Subtarget.getXLenVT();
4664 
4665   switch (IntNo) {
4666   default:
4667     break; // Don't custom lower most intrinsics.
4668   case Intrinsic::thread_pointer: {
4669     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4670     return DAG.getRegister(RISCV::X4, PtrVT);
4671   }
4672   case Intrinsic::riscv_orc_b:
4673   case Intrinsic::riscv_brev8: {
4674     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4675     unsigned Opc =
4676         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4677     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4678                        DAG.getConstant(7, DL, XLenVT));
4679   }
4680   case Intrinsic::riscv_grev:
4681   case Intrinsic::riscv_gorc: {
4682     unsigned Opc =
4683         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4684     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4685   }
4686   case Intrinsic::riscv_zip:
4687   case Intrinsic::riscv_unzip: {
4688     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4689     // For i32 the immdiate is 15. For i64 the immediate is 31.
4690     unsigned Opc =
4691         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4692     unsigned BitWidth = Op.getValueSizeInBits();
4693     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4694     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4695                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4696   }
4697   case Intrinsic::riscv_shfl:
4698   case Intrinsic::riscv_unshfl: {
4699     unsigned Opc =
4700         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4701     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4702   }
4703   case Intrinsic::riscv_bcompress:
4704   case Intrinsic::riscv_bdecompress: {
4705     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4706                                                        : RISCVISD::BDECOMPRESS;
4707     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4708   }
4709   case Intrinsic::riscv_bfp:
4710     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4711                        Op.getOperand(2));
4712   case Intrinsic::riscv_fsl:
4713     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4714                        Op.getOperand(2), Op.getOperand(3));
4715   case Intrinsic::riscv_fsr:
4716     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4717                        Op.getOperand(2), Op.getOperand(3));
4718   case Intrinsic::riscv_vmv_x_s:
4719     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4720     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4721                        Op.getOperand(1));
4722   case Intrinsic::riscv_vmv_v_x:
4723     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4724                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4725                             Subtarget);
4726   case Intrinsic::riscv_vfmv_v_f:
4727     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4728                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4729   case Intrinsic::riscv_vmv_s_x: {
4730     SDValue Scalar = Op.getOperand(2);
4731 
4732     if (Scalar.getValueType().bitsLE(XLenVT)) {
4733       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4734       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4735                          Op.getOperand(1), Scalar, Op.getOperand(3));
4736     }
4737 
4738     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4739 
4740     // This is an i64 value that lives in two scalar registers. We have to
4741     // insert this in a convoluted way. First we build vXi64 splat containing
4742     // the/ two values that we assemble using some bit math. Next we'll use
4743     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4744     // to merge element 0 from our splat into the source vector.
4745     // FIXME: This is probably not the best way to do this, but it is
4746     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4747     // point.
4748     //   sw lo, (a0)
4749     //   sw hi, 4(a0)
4750     //   vlse vX, (a0)
4751     //
4752     //   vid.v      vVid
4753     //   vmseq.vx   mMask, vVid, 0
4754     //   vmerge.vvm vDest, vSrc, vVal, mMask
4755     MVT VT = Op.getSimpleValueType();
4756     SDValue Vec = Op.getOperand(1);
4757     SDValue VL = getVLOperand(Op);
4758 
4759     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4760     if (Op.getOperand(1).isUndef())
4761       return SplattedVal;
4762     SDValue SplattedIdx =
4763         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4764                     DAG.getConstant(0, DL, MVT::i32), VL);
4765 
4766     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4767     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4768     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4769     SDValue SelectCond =
4770         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4771                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4772     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4773                        Vec, VL);
4774   }
4775   case Intrinsic::riscv_vslide1up:
4776   case Intrinsic::riscv_vslide1down:
4777   case Intrinsic::riscv_vslide1up_mask:
4778   case Intrinsic::riscv_vslide1down_mask: {
4779     // We need to special case these when the scalar is larger than XLen.
4780     unsigned NumOps = Op.getNumOperands();
4781     bool IsMasked = NumOps == 7;
4782     SDValue Scalar = Op.getOperand(3);
4783     if (Scalar.getValueType().bitsLE(XLenVT))
4784       break;
4785 
4786     // Splatting a sign extended constant is fine.
4787     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4788       if (isInt<32>(CVal->getSExtValue()))
4789         break;
4790 
4791     MVT VT = Op.getSimpleValueType();
4792     assert(VT.getVectorElementType() == MVT::i64 &&
4793            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4794 
4795     // Convert the vector source to the equivalent nxvXi32 vector.
4796     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4797     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(2));
4798 
4799     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4800                                    DAG.getConstant(0, DL, XLenVT));
4801     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4802                                    DAG.getConstant(1, DL, XLenVT));
4803 
4804     // Double the VL since we halved SEW.
4805     SDValue VL = getVLOperand(Op);
4806     SDValue I32VL =
4807         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4808 
4809     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4810     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4811 
4812     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4813     // instructions.
4814     SDValue Passthru = DAG.getBitcast(I32VT, Op.getOperand(1));
4815     if (!IsMasked) {
4816       if (IntNo == Intrinsic::riscv_vslide1up) {
4817         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4818                           ScalarHi, I32Mask, I32VL);
4819         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4820                           ScalarLo, I32Mask, I32VL);
4821       } else {
4822         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4823                           ScalarLo, I32Mask, I32VL);
4824         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4825                           ScalarHi, I32Mask, I32VL);
4826       }
4827     } else {
4828       // TODO Those VSLIDE1 could be TAMA because we use vmerge to select
4829       // maskedoff
4830       SDValue Undef = DAG.getUNDEF(I32VT);
4831       if (IntNo == Intrinsic::riscv_vslide1up_mask) {
4832         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
4833                           ScalarHi, I32Mask, I32VL);
4834         Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Undef, Vec,
4835                           ScalarLo, I32Mask, I32VL);
4836       } else {
4837         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
4838                           ScalarLo, I32Mask, I32VL);
4839         Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Undef, Vec,
4840                           ScalarHi, I32Mask, I32VL);
4841       }
4842     }
4843 
4844     // Convert back to nxvXi64.
4845     Vec = DAG.getBitcast(VT, Vec);
4846 
4847     if (!IsMasked)
4848       return Vec;
4849     // Apply mask after the operation.
4850     SDValue Mask = Op.getOperand(NumOps - 3);
4851     SDValue MaskedOff = Op.getOperand(1);
4852     // Assume Policy operand is the last operand.
4853     uint64_t Policy = Op.getConstantOperandVal(NumOps - 1);
4854     // We don't need to select maskedoff if it's undef.
4855     if (MaskedOff.isUndef())
4856       return Vec;
4857     // TAMU
4858     if (Policy == RISCVII::TAIL_AGNOSTIC)
4859       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4860                          VL);
4861     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4862     // It's fine because vmerge does not care mask policy.
4863     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4864   }
4865   }
4866 
4867   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4868 }
4869 
4870 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4871                                                     SelectionDAG &DAG) const {
4872   unsigned IntNo = Op.getConstantOperandVal(1);
4873   switch (IntNo) {
4874   default:
4875     break;
4876   case Intrinsic::riscv_masked_strided_load: {
4877     SDLoc DL(Op);
4878     MVT XLenVT = Subtarget.getXLenVT();
4879 
4880     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4881     // the selection of the masked intrinsics doesn't do this for us.
4882     SDValue Mask = Op.getOperand(5);
4883     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4884 
4885     MVT VT = Op->getSimpleValueType(0);
4886     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4887 
4888     SDValue PassThru = Op.getOperand(2);
4889     if (!IsUnmasked) {
4890       MVT MaskVT =
4891           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4892       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4893       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4894     }
4895 
4896     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4897 
4898     SDValue IntID = DAG.getTargetConstant(
4899         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4900         XLenVT);
4901 
4902     auto *Load = cast<MemIntrinsicSDNode>(Op);
4903     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4904     if (IsUnmasked)
4905       Ops.push_back(DAG.getUNDEF(ContainerVT));
4906     else
4907       Ops.push_back(PassThru);
4908     Ops.push_back(Op.getOperand(3)); // Ptr
4909     Ops.push_back(Op.getOperand(4)); // Stride
4910     if (!IsUnmasked)
4911       Ops.push_back(Mask);
4912     Ops.push_back(VL);
4913     if (!IsUnmasked) {
4914       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4915       Ops.push_back(Policy);
4916     }
4917 
4918     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4919     SDValue Result =
4920         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4921                                 Load->getMemoryVT(), Load->getMemOperand());
4922     SDValue Chain = Result.getValue(1);
4923     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4924     return DAG.getMergeValues({Result, Chain}, DL);
4925   }
4926   }
4927 
4928   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4929 }
4930 
4931 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4932                                                  SelectionDAG &DAG) const {
4933   unsigned IntNo = Op.getConstantOperandVal(1);
4934   switch (IntNo) {
4935   default:
4936     break;
4937   case Intrinsic::riscv_masked_strided_store: {
4938     SDLoc DL(Op);
4939     MVT XLenVT = Subtarget.getXLenVT();
4940 
4941     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4942     // the selection of the masked intrinsics doesn't do this for us.
4943     SDValue Mask = Op.getOperand(5);
4944     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4945 
4946     SDValue Val = Op.getOperand(2);
4947     MVT VT = Val.getSimpleValueType();
4948     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4949 
4950     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4951     if (!IsUnmasked) {
4952       MVT MaskVT =
4953           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4954       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4955     }
4956 
4957     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4958 
4959     SDValue IntID = DAG.getTargetConstant(
4960         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4961         XLenVT);
4962 
4963     auto *Store = cast<MemIntrinsicSDNode>(Op);
4964     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4965     Ops.push_back(Val);
4966     Ops.push_back(Op.getOperand(3)); // Ptr
4967     Ops.push_back(Op.getOperand(4)); // Stride
4968     if (!IsUnmasked)
4969       Ops.push_back(Mask);
4970     Ops.push_back(VL);
4971 
4972     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4973                                    Ops, Store->getMemoryVT(),
4974                                    Store->getMemOperand());
4975   }
4976   }
4977 
4978   return SDValue();
4979 }
4980 
4981 static MVT getLMUL1VT(MVT VT) {
4982   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4983          "Unexpected vector MVT");
4984   return MVT::getScalableVectorVT(
4985       VT.getVectorElementType(),
4986       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4987 }
4988 
4989 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4990   switch (ISDOpcode) {
4991   default:
4992     llvm_unreachable("Unhandled reduction");
4993   case ISD::VECREDUCE_ADD:
4994     return RISCVISD::VECREDUCE_ADD_VL;
4995   case ISD::VECREDUCE_UMAX:
4996     return RISCVISD::VECREDUCE_UMAX_VL;
4997   case ISD::VECREDUCE_SMAX:
4998     return RISCVISD::VECREDUCE_SMAX_VL;
4999   case ISD::VECREDUCE_UMIN:
5000     return RISCVISD::VECREDUCE_UMIN_VL;
5001   case ISD::VECREDUCE_SMIN:
5002     return RISCVISD::VECREDUCE_SMIN_VL;
5003   case ISD::VECREDUCE_AND:
5004     return RISCVISD::VECREDUCE_AND_VL;
5005   case ISD::VECREDUCE_OR:
5006     return RISCVISD::VECREDUCE_OR_VL;
5007   case ISD::VECREDUCE_XOR:
5008     return RISCVISD::VECREDUCE_XOR_VL;
5009   }
5010 }
5011 
5012 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5013                                                          SelectionDAG &DAG,
5014                                                          bool IsVP) const {
5015   SDLoc DL(Op);
5016   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5017   MVT VecVT = Vec.getSimpleValueType();
5018   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5019           Op.getOpcode() == ISD::VECREDUCE_OR ||
5020           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5021           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5022           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5023           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5024          "Unexpected reduction lowering");
5025 
5026   MVT XLenVT = Subtarget.getXLenVT();
5027   assert(Op.getValueType() == XLenVT &&
5028          "Expected reduction output to be legalized to XLenVT");
5029 
5030   MVT ContainerVT = VecVT;
5031   if (VecVT.isFixedLengthVector()) {
5032     ContainerVT = getContainerForFixedLengthVector(VecVT);
5033     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5034   }
5035 
5036   SDValue Mask, VL;
5037   if (IsVP) {
5038     Mask = Op.getOperand(2);
5039     VL = Op.getOperand(3);
5040   } else {
5041     std::tie(Mask, VL) =
5042         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5043   }
5044 
5045   unsigned BaseOpc;
5046   ISD::CondCode CC;
5047   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5048 
5049   switch (Op.getOpcode()) {
5050   default:
5051     llvm_unreachable("Unhandled reduction");
5052   case ISD::VECREDUCE_AND:
5053   case ISD::VP_REDUCE_AND: {
5054     // vcpop ~x == 0
5055     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5056     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5057     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5058     CC = ISD::SETEQ;
5059     BaseOpc = ISD::AND;
5060     break;
5061   }
5062   case ISD::VECREDUCE_OR:
5063   case ISD::VP_REDUCE_OR:
5064     // vcpop x != 0
5065     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5066     CC = ISD::SETNE;
5067     BaseOpc = ISD::OR;
5068     break;
5069   case ISD::VECREDUCE_XOR:
5070   case ISD::VP_REDUCE_XOR: {
5071     // ((vcpop x) & 1) != 0
5072     SDValue One = DAG.getConstant(1, DL, XLenVT);
5073     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5074     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5075     CC = ISD::SETNE;
5076     BaseOpc = ISD::XOR;
5077     break;
5078   }
5079   }
5080 
5081   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5082 
5083   if (!IsVP)
5084     return SetCC;
5085 
5086   // Now include the start value in the operation.
5087   // Note that we must return the start value when no elements are operated
5088   // upon. The vcpop instructions we've emitted in each case above will return
5089   // 0 for an inactive vector, and so we've already received the neutral value:
5090   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5091   // can simply include the start value.
5092   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5093 }
5094 
5095 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5096                                             SelectionDAG &DAG) const {
5097   SDLoc DL(Op);
5098   SDValue Vec = Op.getOperand(0);
5099   EVT VecEVT = Vec.getValueType();
5100 
5101   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5102 
5103   // Due to ordering in legalize types we may have a vector type that needs to
5104   // be split. Do that manually so we can get down to a legal type.
5105   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5106          TargetLowering::TypeSplitVector) {
5107     SDValue Lo, Hi;
5108     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5109     VecEVT = Lo.getValueType();
5110     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5111   }
5112 
5113   // TODO: The type may need to be widened rather than split. Or widened before
5114   // it can be split.
5115   if (!isTypeLegal(VecEVT))
5116     return SDValue();
5117 
5118   MVT VecVT = VecEVT.getSimpleVT();
5119   MVT VecEltVT = VecVT.getVectorElementType();
5120   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5121 
5122   MVT ContainerVT = VecVT;
5123   if (VecVT.isFixedLengthVector()) {
5124     ContainerVT = getContainerForFixedLengthVector(VecVT);
5125     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5126   }
5127 
5128   MVT M1VT = getLMUL1VT(ContainerVT);
5129   MVT XLenVT = Subtarget.getXLenVT();
5130 
5131   SDValue Mask, VL;
5132   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5133 
5134   SDValue NeutralElem =
5135       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5136   SDValue IdentitySplat =
5137       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5138                        M1VT, DL, DAG, Subtarget);
5139   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5140                                   IdentitySplat, Mask, VL);
5141   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5142                              DAG.getConstant(0, DL, XLenVT));
5143   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5144 }
5145 
5146 // Given a reduction op, this function returns the matching reduction opcode,
5147 // the vector SDValue and the scalar SDValue required to lower this to a
5148 // RISCVISD node.
5149 static std::tuple<unsigned, SDValue, SDValue>
5150 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5151   SDLoc DL(Op);
5152   auto Flags = Op->getFlags();
5153   unsigned Opcode = Op.getOpcode();
5154   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5155   switch (Opcode) {
5156   default:
5157     llvm_unreachable("Unhandled reduction");
5158   case ISD::VECREDUCE_FADD: {
5159     // Use positive zero if we can. It is cheaper to materialize.
5160     SDValue Zero =
5161         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5162     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5163   }
5164   case ISD::VECREDUCE_SEQ_FADD:
5165     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5166                            Op.getOperand(0));
5167   case ISD::VECREDUCE_FMIN:
5168     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5169                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5170   case ISD::VECREDUCE_FMAX:
5171     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5172                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5173   }
5174 }
5175 
5176 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5177                                               SelectionDAG &DAG) const {
5178   SDLoc DL(Op);
5179   MVT VecEltVT = Op.getSimpleValueType();
5180 
5181   unsigned RVVOpcode;
5182   SDValue VectorVal, ScalarVal;
5183   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5184       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5185   MVT VecVT = VectorVal.getSimpleValueType();
5186 
5187   MVT ContainerVT = VecVT;
5188   if (VecVT.isFixedLengthVector()) {
5189     ContainerVT = getContainerForFixedLengthVector(VecVT);
5190     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5191   }
5192 
5193   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5194   MVT XLenVT = Subtarget.getXLenVT();
5195 
5196   SDValue Mask, VL;
5197   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5198 
5199   SDValue ScalarSplat =
5200       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5201                        M1VT, DL, DAG, Subtarget);
5202   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5203                                   VectorVal, ScalarSplat, Mask, VL);
5204   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5205                      DAG.getConstant(0, DL, XLenVT));
5206 }
5207 
5208 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5209   switch (ISDOpcode) {
5210   default:
5211     llvm_unreachable("Unhandled reduction");
5212   case ISD::VP_REDUCE_ADD:
5213     return RISCVISD::VECREDUCE_ADD_VL;
5214   case ISD::VP_REDUCE_UMAX:
5215     return RISCVISD::VECREDUCE_UMAX_VL;
5216   case ISD::VP_REDUCE_SMAX:
5217     return RISCVISD::VECREDUCE_SMAX_VL;
5218   case ISD::VP_REDUCE_UMIN:
5219     return RISCVISD::VECREDUCE_UMIN_VL;
5220   case ISD::VP_REDUCE_SMIN:
5221     return RISCVISD::VECREDUCE_SMIN_VL;
5222   case ISD::VP_REDUCE_AND:
5223     return RISCVISD::VECREDUCE_AND_VL;
5224   case ISD::VP_REDUCE_OR:
5225     return RISCVISD::VECREDUCE_OR_VL;
5226   case ISD::VP_REDUCE_XOR:
5227     return RISCVISD::VECREDUCE_XOR_VL;
5228   case ISD::VP_REDUCE_FADD:
5229     return RISCVISD::VECREDUCE_FADD_VL;
5230   case ISD::VP_REDUCE_SEQ_FADD:
5231     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5232   case ISD::VP_REDUCE_FMAX:
5233     return RISCVISD::VECREDUCE_FMAX_VL;
5234   case ISD::VP_REDUCE_FMIN:
5235     return RISCVISD::VECREDUCE_FMIN_VL;
5236   }
5237 }
5238 
5239 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5240                                            SelectionDAG &DAG) const {
5241   SDLoc DL(Op);
5242   SDValue Vec = Op.getOperand(1);
5243   EVT VecEVT = Vec.getValueType();
5244 
5245   // TODO: The type may need to be widened rather than split. Or widened before
5246   // it can be split.
5247   if (!isTypeLegal(VecEVT))
5248     return SDValue();
5249 
5250   MVT VecVT = VecEVT.getSimpleVT();
5251   MVT VecEltVT = VecVT.getVectorElementType();
5252   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5253 
5254   MVT ContainerVT = VecVT;
5255   if (VecVT.isFixedLengthVector()) {
5256     ContainerVT = getContainerForFixedLengthVector(VecVT);
5257     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5258   }
5259 
5260   SDValue VL = Op.getOperand(3);
5261   SDValue Mask = Op.getOperand(2);
5262 
5263   MVT M1VT = getLMUL1VT(ContainerVT);
5264   MVT XLenVT = Subtarget.getXLenVT();
5265   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5266 
5267   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5268                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5269                                         DL, DAG, Subtarget);
5270   SDValue Reduction =
5271       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5272   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5273                              DAG.getConstant(0, DL, XLenVT));
5274   if (!VecVT.isInteger())
5275     return Elt0;
5276   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5277 }
5278 
5279 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5280                                                    SelectionDAG &DAG) const {
5281   SDValue Vec = Op.getOperand(0);
5282   SDValue SubVec = Op.getOperand(1);
5283   MVT VecVT = Vec.getSimpleValueType();
5284   MVT SubVecVT = SubVec.getSimpleValueType();
5285 
5286   SDLoc DL(Op);
5287   MVT XLenVT = Subtarget.getXLenVT();
5288   unsigned OrigIdx = Op.getConstantOperandVal(2);
5289   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5290 
5291   // We don't have the ability to slide mask vectors up indexed by their i1
5292   // elements; the smallest we can do is i8. Often we are able to bitcast to
5293   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5294   // into a scalable one, we might not necessarily have enough scalable
5295   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5296   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5297       (OrigIdx != 0 || !Vec.isUndef())) {
5298     if (VecVT.getVectorMinNumElements() >= 8 &&
5299         SubVecVT.getVectorMinNumElements() >= 8) {
5300       assert(OrigIdx % 8 == 0 && "Invalid index");
5301       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5302              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5303              "Unexpected mask vector lowering");
5304       OrigIdx /= 8;
5305       SubVecVT =
5306           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5307                            SubVecVT.isScalableVector());
5308       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5309                                VecVT.isScalableVector());
5310       Vec = DAG.getBitcast(VecVT, Vec);
5311       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5312     } else {
5313       // We can't slide this mask vector up indexed by its i1 elements.
5314       // This poses a problem when we wish to insert a scalable vector which
5315       // can't be re-expressed as a larger type. Just choose the slow path and
5316       // extend to a larger type, then truncate back down.
5317       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5318       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5319       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5320       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5321       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5322                         Op.getOperand(2));
5323       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5324       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5325     }
5326   }
5327 
5328   // If the subvector vector is a fixed-length type, we cannot use subregister
5329   // manipulation to simplify the codegen; we don't know which register of a
5330   // LMUL group contains the specific subvector as we only know the minimum
5331   // register size. Therefore we must slide the vector group up the full
5332   // amount.
5333   if (SubVecVT.isFixedLengthVector()) {
5334     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5335       return Op;
5336     MVT ContainerVT = VecVT;
5337     if (VecVT.isFixedLengthVector()) {
5338       ContainerVT = getContainerForFixedLengthVector(VecVT);
5339       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5340     }
5341     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5342                          DAG.getUNDEF(ContainerVT), SubVec,
5343                          DAG.getConstant(0, DL, XLenVT));
5344     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5345       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5346       return DAG.getBitcast(Op.getValueType(), SubVec);
5347     }
5348     SDValue Mask =
5349         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5350     // Set the vector length to only the number of elements we care about. Note
5351     // that for slideup this includes the offset.
5352     SDValue VL =
5353         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5354     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5355     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5356                                   SubVec, SlideupAmt, Mask, VL);
5357     if (VecVT.isFixedLengthVector())
5358       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5359     return DAG.getBitcast(Op.getValueType(), Slideup);
5360   }
5361 
5362   unsigned SubRegIdx, RemIdx;
5363   std::tie(SubRegIdx, RemIdx) =
5364       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5365           VecVT, SubVecVT, OrigIdx, TRI);
5366 
5367   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5368   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5369                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5370                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5371 
5372   // 1. If the Idx has been completely eliminated and this subvector's size is
5373   // a vector register or a multiple thereof, or the surrounding elements are
5374   // undef, then this is a subvector insert which naturally aligns to a vector
5375   // register. These can easily be handled using subregister manipulation.
5376   // 2. If the subvector is smaller than a vector register, then the insertion
5377   // must preserve the undisturbed elements of the register. We do this by
5378   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5379   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5380   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5381   // LMUL=1 type back into the larger vector (resolving to another subregister
5382   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5383   // to avoid allocating a large register group to hold our subvector.
5384   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5385     return Op;
5386 
5387   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5388   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5389   // (in our case undisturbed). This means we can set up a subvector insertion
5390   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5391   // size of the subvector.
5392   MVT InterSubVT = VecVT;
5393   SDValue AlignedExtract = Vec;
5394   unsigned AlignedIdx = OrigIdx - RemIdx;
5395   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5396     InterSubVT = getLMUL1VT(VecVT);
5397     // Extract a subvector equal to the nearest full vector register type. This
5398     // should resolve to a EXTRACT_SUBREG instruction.
5399     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5400                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5401   }
5402 
5403   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5404   // For scalable vectors this must be further multiplied by vscale.
5405   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5406 
5407   SDValue Mask, VL;
5408   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5409 
5410   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5411   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5412   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5413   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5414 
5415   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5416                        DAG.getUNDEF(InterSubVT), SubVec,
5417                        DAG.getConstant(0, DL, XLenVT));
5418 
5419   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5420                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5421 
5422   // If required, insert this subvector back into the correct vector register.
5423   // This should resolve to an INSERT_SUBREG instruction.
5424   if (VecVT.bitsGT(InterSubVT))
5425     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5426                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5427 
5428   // We might have bitcast from a mask type: cast back to the original type if
5429   // required.
5430   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5431 }
5432 
5433 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5434                                                     SelectionDAG &DAG) const {
5435   SDValue Vec = Op.getOperand(0);
5436   MVT SubVecVT = Op.getSimpleValueType();
5437   MVT VecVT = Vec.getSimpleValueType();
5438 
5439   SDLoc DL(Op);
5440   MVT XLenVT = Subtarget.getXLenVT();
5441   unsigned OrigIdx = Op.getConstantOperandVal(1);
5442   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5443 
5444   // We don't have the ability to slide mask vectors down indexed by their i1
5445   // elements; the smallest we can do is i8. Often we are able to bitcast to
5446   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5447   // from a scalable one, we might not necessarily have enough scalable
5448   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5449   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5450     if (VecVT.getVectorMinNumElements() >= 8 &&
5451         SubVecVT.getVectorMinNumElements() >= 8) {
5452       assert(OrigIdx % 8 == 0 && "Invalid index");
5453       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5454              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5455              "Unexpected mask vector lowering");
5456       OrigIdx /= 8;
5457       SubVecVT =
5458           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5459                            SubVecVT.isScalableVector());
5460       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5461                                VecVT.isScalableVector());
5462       Vec = DAG.getBitcast(VecVT, Vec);
5463     } else {
5464       // We can't slide this mask vector down, indexed by its i1 elements.
5465       // This poses a problem when we wish to extract a scalable vector which
5466       // can't be re-expressed as a larger type. Just choose the slow path and
5467       // extend to a larger type, then truncate back down.
5468       // TODO: We could probably improve this when extracting certain fixed
5469       // from fixed, where we can extract as i8 and shift the correct element
5470       // right to reach the desired subvector?
5471       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5472       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5473       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5474       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5475                         Op.getOperand(1));
5476       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5477       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5478     }
5479   }
5480 
5481   // If the subvector vector is a fixed-length type, we cannot use subregister
5482   // manipulation to simplify the codegen; we don't know which register of a
5483   // LMUL group contains the specific subvector as we only know the minimum
5484   // register size. Therefore we must slide the vector group down the full
5485   // amount.
5486   if (SubVecVT.isFixedLengthVector()) {
5487     // With an index of 0 this is a cast-like subvector, which can be performed
5488     // with subregister operations.
5489     if (OrigIdx == 0)
5490       return Op;
5491     MVT ContainerVT = VecVT;
5492     if (VecVT.isFixedLengthVector()) {
5493       ContainerVT = getContainerForFixedLengthVector(VecVT);
5494       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5495     }
5496     SDValue Mask =
5497         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5498     // Set the vector length to only the number of elements we care about. This
5499     // avoids sliding down elements we're going to discard straight away.
5500     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5501     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5502     SDValue Slidedown =
5503         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5504                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5505     // Now we can use a cast-like subvector extract to get the result.
5506     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5507                             DAG.getConstant(0, DL, XLenVT));
5508     return DAG.getBitcast(Op.getValueType(), Slidedown);
5509   }
5510 
5511   unsigned SubRegIdx, RemIdx;
5512   std::tie(SubRegIdx, RemIdx) =
5513       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5514           VecVT, SubVecVT, OrigIdx, TRI);
5515 
5516   // If the Idx has been completely eliminated then this is a subvector extract
5517   // which naturally aligns to a vector register. These can easily be handled
5518   // using subregister manipulation.
5519   if (RemIdx == 0)
5520     return Op;
5521 
5522   // Else we must shift our vector register directly to extract the subvector.
5523   // Do this using VSLIDEDOWN.
5524 
5525   // If the vector type is an LMUL-group type, extract a subvector equal to the
5526   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5527   // instruction.
5528   MVT InterSubVT = VecVT;
5529   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5530     InterSubVT = getLMUL1VT(VecVT);
5531     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5532                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5533   }
5534 
5535   // Slide this vector register down by the desired number of elements in order
5536   // to place the desired subvector starting at element 0.
5537   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5538   // For scalable vectors this must be further multiplied by vscale.
5539   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5540 
5541   SDValue Mask, VL;
5542   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5543   SDValue Slidedown =
5544       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5545                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5546 
5547   // Now the vector is in the right position, extract our final subvector. This
5548   // should resolve to a COPY.
5549   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5550                           DAG.getConstant(0, DL, XLenVT));
5551 
5552   // We might have bitcast from a mask type: cast back to the original type if
5553   // required.
5554   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5555 }
5556 
5557 // Lower step_vector to the vid instruction. Any non-identity step value must
5558 // be accounted for my manual expansion.
5559 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5560                                               SelectionDAG &DAG) const {
5561   SDLoc DL(Op);
5562   MVT VT = Op.getSimpleValueType();
5563   MVT XLenVT = Subtarget.getXLenVT();
5564   SDValue Mask, VL;
5565   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5566   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5567   uint64_t StepValImm = Op.getConstantOperandVal(0);
5568   if (StepValImm != 1) {
5569     if (isPowerOf2_64(StepValImm)) {
5570       SDValue StepVal =
5571           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5572                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5573       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5574     } else {
5575       SDValue StepVal = lowerScalarSplat(
5576           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5577           VL, VT, DL, DAG, Subtarget);
5578       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5579     }
5580   }
5581   return StepVec;
5582 }
5583 
5584 // Implement vector_reverse using vrgather.vv with indices determined by
5585 // subtracting the id of each element from (VLMAX-1). This will convert
5586 // the indices like so:
5587 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5588 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5589 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5590                                                  SelectionDAG &DAG) const {
5591   SDLoc DL(Op);
5592   MVT VecVT = Op.getSimpleValueType();
5593   unsigned EltSize = VecVT.getScalarSizeInBits();
5594   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5595 
5596   unsigned MaxVLMAX = 0;
5597   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5598   if (VectorBitsMax != 0)
5599     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5600 
5601   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5602   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5603 
5604   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5605   // to use vrgatherei16.vv.
5606   // TODO: It's also possible to use vrgatherei16.vv for other types to
5607   // decrease register width for the index calculation.
5608   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5609     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5610     // Reverse each half, then reassemble them in reverse order.
5611     // NOTE: It's also possible that after splitting that VLMAX no longer
5612     // requires vrgatherei16.vv.
5613     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5614       SDValue Lo, Hi;
5615       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5616       EVT LoVT, HiVT;
5617       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5618       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5619       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5620       // Reassemble the low and high pieces reversed.
5621       // FIXME: This is a CONCAT_VECTORS.
5622       SDValue Res =
5623           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5624                       DAG.getIntPtrConstant(0, DL));
5625       return DAG.getNode(
5626           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5627           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5628     }
5629 
5630     // Just promote the int type to i16 which will double the LMUL.
5631     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5632     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5633   }
5634 
5635   MVT XLenVT = Subtarget.getXLenVT();
5636   SDValue Mask, VL;
5637   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5638 
5639   // Calculate VLMAX-1 for the desired SEW.
5640   unsigned MinElts = VecVT.getVectorMinNumElements();
5641   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5642                               DAG.getConstant(MinElts, DL, XLenVT));
5643   SDValue VLMinus1 =
5644       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5645 
5646   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5647   bool IsRV32E64 =
5648       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5649   SDValue SplatVL;
5650   if (!IsRV32E64)
5651     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5652   else
5653     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5654                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5655 
5656   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5657   SDValue Indices =
5658       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5659 
5660   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5661 }
5662 
5663 SDValue
5664 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5665                                                      SelectionDAG &DAG) const {
5666   SDLoc DL(Op);
5667   auto *Load = cast<LoadSDNode>(Op);
5668 
5669   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5670                                         Load->getMemoryVT(),
5671                                         *Load->getMemOperand()) &&
5672          "Expecting a correctly-aligned load");
5673 
5674   MVT VT = Op.getSimpleValueType();
5675   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5676 
5677   SDValue VL =
5678       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5679 
5680   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5681   SDValue NewLoad = DAG.getMemIntrinsicNode(
5682       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5683       Load->getMemoryVT(), Load->getMemOperand());
5684 
5685   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5686   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5687 }
5688 
5689 SDValue
5690 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5691                                                       SelectionDAG &DAG) const {
5692   SDLoc DL(Op);
5693   auto *Store = cast<StoreSDNode>(Op);
5694 
5695   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5696                                         Store->getMemoryVT(),
5697                                         *Store->getMemOperand()) &&
5698          "Expecting a correctly-aligned store");
5699 
5700   SDValue StoreVal = Store->getValue();
5701   MVT VT = StoreVal.getSimpleValueType();
5702 
5703   // If the size less than a byte, we need to pad with zeros to make a byte.
5704   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5705     VT = MVT::v8i1;
5706     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5707                            DAG.getConstant(0, DL, VT), StoreVal,
5708                            DAG.getIntPtrConstant(0, DL));
5709   }
5710 
5711   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5712 
5713   SDValue VL =
5714       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5715 
5716   SDValue NewValue =
5717       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5718   return DAG.getMemIntrinsicNode(
5719       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5720       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5721       Store->getMemoryVT(), Store->getMemOperand());
5722 }
5723 
5724 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5725                                              SelectionDAG &DAG) const {
5726   SDLoc DL(Op);
5727   MVT VT = Op.getSimpleValueType();
5728 
5729   const auto *MemSD = cast<MemSDNode>(Op);
5730   EVT MemVT = MemSD->getMemoryVT();
5731   MachineMemOperand *MMO = MemSD->getMemOperand();
5732   SDValue Chain = MemSD->getChain();
5733   SDValue BasePtr = MemSD->getBasePtr();
5734 
5735   SDValue Mask, PassThru, VL;
5736   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5737     Mask = VPLoad->getMask();
5738     PassThru = DAG.getUNDEF(VT);
5739     VL = VPLoad->getVectorLength();
5740   } else {
5741     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5742     Mask = MLoad->getMask();
5743     PassThru = MLoad->getPassThru();
5744   }
5745 
5746   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5747 
5748   MVT XLenVT = Subtarget.getXLenVT();
5749 
5750   MVT ContainerVT = VT;
5751   if (VT.isFixedLengthVector()) {
5752     ContainerVT = getContainerForFixedLengthVector(VT);
5753     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5754     if (!IsUnmasked) {
5755       MVT MaskVT =
5756           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5757       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5758     }
5759   }
5760 
5761   if (!VL)
5762     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5763 
5764   unsigned IntID =
5765       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5766   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5767   if (IsUnmasked)
5768     Ops.push_back(DAG.getUNDEF(ContainerVT));
5769   else
5770     Ops.push_back(PassThru);
5771   Ops.push_back(BasePtr);
5772   if (!IsUnmasked)
5773     Ops.push_back(Mask);
5774   Ops.push_back(VL);
5775   if (!IsUnmasked)
5776     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5777 
5778   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5779 
5780   SDValue Result =
5781       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5782   Chain = Result.getValue(1);
5783 
5784   if (VT.isFixedLengthVector())
5785     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5786 
5787   return DAG.getMergeValues({Result, Chain}, DL);
5788 }
5789 
5790 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5791                                               SelectionDAG &DAG) const {
5792   SDLoc DL(Op);
5793 
5794   const auto *MemSD = cast<MemSDNode>(Op);
5795   EVT MemVT = MemSD->getMemoryVT();
5796   MachineMemOperand *MMO = MemSD->getMemOperand();
5797   SDValue Chain = MemSD->getChain();
5798   SDValue BasePtr = MemSD->getBasePtr();
5799   SDValue Val, Mask, VL;
5800 
5801   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5802     Val = VPStore->getValue();
5803     Mask = VPStore->getMask();
5804     VL = VPStore->getVectorLength();
5805   } else {
5806     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5807     Val = MStore->getValue();
5808     Mask = MStore->getMask();
5809   }
5810 
5811   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5812 
5813   MVT VT = Val.getSimpleValueType();
5814   MVT XLenVT = Subtarget.getXLenVT();
5815 
5816   MVT ContainerVT = VT;
5817   if (VT.isFixedLengthVector()) {
5818     ContainerVT = getContainerForFixedLengthVector(VT);
5819 
5820     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5821     if (!IsUnmasked) {
5822       MVT MaskVT =
5823           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5824       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5825     }
5826   }
5827 
5828   if (!VL)
5829     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5830 
5831   unsigned IntID =
5832       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5833   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5834   Ops.push_back(Val);
5835   Ops.push_back(BasePtr);
5836   if (!IsUnmasked)
5837     Ops.push_back(Mask);
5838   Ops.push_back(VL);
5839 
5840   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5841                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5842 }
5843 
5844 SDValue
5845 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5846                                                       SelectionDAG &DAG) const {
5847   MVT InVT = Op.getOperand(0).getSimpleValueType();
5848   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5849 
5850   MVT VT = Op.getSimpleValueType();
5851 
5852   SDValue Op1 =
5853       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5854   SDValue Op2 =
5855       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5856 
5857   SDLoc DL(Op);
5858   SDValue VL =
5859       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5860 
5861   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5862   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5863 
5864   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5865                             Op.getOperand(2), Mask, VL);
5866 
5867   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5868 }
5869 
5870 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5871     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5872   MVT VT = Op.getSimpleValueType();
5873 
5874   if (VT.getVectorElementType() == MVT::i1)
5875     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5876 
5877   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5878 }
5879 
5880 SDValue
5881 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5882                                                       SelectionDAG &DAG) const {
5883   unsigned Opc;
5884   switch (Op.getOpcode()) {
5885   default: llvm_unreachable("Unexpected opcode!");
5886   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5887   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5888   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5889   }
5890 
5891   return lowerToScalableOp(Op, DAG, Opc);
5892 }
5893 
5894 // Lower vector ABS to smax(X, sub(0, X)).
5895 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5896   SDLoc DL(Op);
5897   MVT VT = Op.getSimpleValueType();
5898   SDValue X = Op.getOperand(0);
5899 
5900   assert(VT.isFixedLengthVector() && "Unexpected type");
5901 
5902   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5903   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5904 
5905   SDValue Mask, VL;
5906   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5907 
5908   SDValue SplatZero = DAG.getNode(
5909       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
5910       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5911   SDValue NegX =
5912       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5913   SDValue Max =
5914       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5915 
5916   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5917 }
5918 
5919 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5920     SDValue Op, SelectionDAG &DAG) const {
5921   SDLoc DL(Op);
5922   MVT VT = Op.getSimpleValueType();
5923   SDValue Mag = Op.getOperand(0);
5924   SDValue Sign = Op.getOperand(1);
5925   assert(Mag.getValueType() == Sign.getValueType() &&
5926          "Can only handle COPYSIGN with matching types.");
5927 
5928   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5929   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5930   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5931 
5932   SDValue Mask, VL;
5933   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5934 
5935   SDValue CopySign =
5936       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5937 
5938   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5939 }
5940 
5941 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5942     SDValue Op, SelectionDAG &DAG) const {
5943   MVT VT = Op.getSimpleValueType();
5944   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5945 
5946   MVT I1ContainerVT =
5947       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5948 
5949   SDValue CC =
5950       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5951   SDValue Op1 =
5952       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5953   SDValue Op2 =
5954       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5955 
5956   SDLoc DL(Op);
5957   SDValue Mask, VL;
5958   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5959 
5960   SDValue Select =
5961       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5962 
5963   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5964 }
5965 
5966 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5967                                                unsigned NewOpc,
5968                                                bool HasMask) const {
5969   MVT VT = Op.getSimpleValueType();
5970   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5971 
5972   // Create list of operands by converting existing ones to scalable types.
5973   SmallVector<SDValue, 6> Ops;
5974   for (const SDValue &V : Op->op_values()) {
5975     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5976 
5977     // Pass through non-vector operands.
5978     if (!V.getValueType().isVector()) {
5979       Ops.push_back(V);
5980       continue;
5981     }
5982 
5983     // "cast" fixed length vector to a scalable vector.
5984     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5985            "Only fixed length vectors are supported!");
5986     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5987   }
5988 
5989   SDLoc DL(Op);
5990   SDValue Mask, VL;
5991   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5992   if (HasMask)
5993     Ops.push_back(Mask);
5994   Ops.push_back(VL);
5995 
5996   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5997   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5998 }
5999 
6000 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6001 // * Operands of each node are assumed to be in the same order.
6002 // * The EVL operand is promoted from i32 to i64 on RV64.
6003 // * Fixed-length vectors are converted to their scalable-vector container
6004 //   types.
6005 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6006                                        unsigned RISCVISDOpc) const {
6007   SDLoc DL(Op);
6008   MVT VT = Op.getSimpleValueType();
6009   SmallVector<SDValue, 4> Ops;
6010 
6011   for (const auto &OpIdx : enumerate(Op->ops())) {
6012     SDValue V = OpIdx.value();
6013     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6014     // Pass through operands which aren't fixed-length vectors.
6015     if (!V.getValueType().isFixedLengthVector()) {
6016       Ops.push_back(V);
6017       continue;
6018     }
6019     // "cast" fixed length vector to a scalable vector.
6020     MVT OpVT = V.getSimpleValueType();
6021     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6022     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6023            "Only fixed length vectors are supported!");
6024     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6025   }
6026 
6027   if (!VT.isFixedLengthVector())
6028     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6029 
6030   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6031 
6032   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6033 
6034   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6035 }
6036 
6037 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6038                                             unsigned MaskOpc,
6039                                             unsigned VecOpc) const {
6040   MVT VT = Op.getSimpleValueType();
6041   if (VT.getVectorElementType() != MVT::i1)
6042     return lowerVPOp(Op, DAG, VecOpc);
6043 
6044   // It is safe to drop mask parameter as masked-off elements are undef.
6045   SDValue Op1 = Op->getOperand(0);
6046   SDValue Op2 = Op->getOperand(1);
6047   SDValue VL = Op->getOperand(3);
6048 
6049   MVT ContainerVT = VT;
6050   const bool IsFixed = VT.isFixedLengthVector();
6051   if (IsFixed) {
6052     ContainerVT = getContainerForFixedLengthVector(VT);
6053     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6054     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6055   }
6056 
6057   SDLoc DL(Op);
6058   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6059   if (!IsFixed)
6060     return Val;
6061   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6062 }
6063 
6064 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6065 // matched to a RVV indexed load. The RVV indexed load instructions only
6066 // support the "unsigned unscaled" addressing mode; indices are implicitly
6067 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6068 // signed or scaled indexing is extended to the XLEN value type and scaled
6069 // accordingly.
6070 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6071                                                SelectionDAG &DAG) const {
6072   SDLoc DL(Op);
6073   MVT VT = Op.getSimpleValueType();
6074 
6075   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6076   EVT MemVT = MemSD->getMemoryVT();
6077   MachineMemOperand *MMO = MemSD->getMemOperand();
6078   SDValue Chain = MemSD->getChain();
6079   SDValue BasePtr = MemSD->getBasePtr();
6080 
6081   ISD::LoadExtType LoadExtType;
6082   SDValue Index, Mask, PassThru, VL;
6083 
6084   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6085     Index = VPGN->getIndex();
6086     Mask = VPGN->getMask();
6087     PassThru = DAG.getUNDEF(VT);
6088     VL = VPGN->getVectorLength();
6089     // VP doesn't support extending loads.
6090     LoadExtType = ISD::NON_EXTLOAD;
6091   } else {
6092     // Else it must be a MGATHER.
6093     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6094     Index = MGN->getIndex();
6095     Mask = MGN->getMask();
6096     PassThru = MGN->getPassThru();
6097     LoadExtType = MGN->getExtensionType();
6098   }
6099 
6100   MVT IndexVT = Index.getSimpleValueType();
6101   MVT XLenVT = Subtarget.getXLenVT();
6102 
6103   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6104          "Unexpected VTs!");
6105   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6106   // Targets have to explicitly opt-in for extending vector loads.
6107   assert(LoadExtType == ISD::NON_EXTLOAD &&
6108          "Unexpected extending MGATHER/VP_GATHER");
6109   (void)LoadExtType;
6110 
6111   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6112   // the selection of the masked intrinsics doesn't do this for us.
6113   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6114 
6115   MVT ContainerVT = VT;
6116   if (VT.isFixedLengthVector()) {
6117     // We need to use the larger of the result and index type to determine the
6118     // scalable type to use so we don't increase LMUL for any operand/result.
6119     if (VT.bitsGE(IndexVT)) {
6120       ContainerVT = getContainerForFixedLengthVector(VT);
6121       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6122                                  ContainerVT.getVectorElementCount());
6123     } else {
6124       IndexVT = getContainerForFixedLengthVector(IndexVT);
6125       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6126                                      IndexVT.getVectorElementCount());
6127     }
6128 
6129     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6130 
6131     if (!IsUnmasked) {
6132       MVT MaskVT =
6133           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6134       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6135       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6136     }
6137   }
6138 
6139   if (!VL)
6140     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6141 
6142   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6143     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6144     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6145                                    VL);
6146     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6147                         TrueMask, VL);
6148   }
6149 
6150   unsigned IntID =
6151       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6152   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6153   if (IsUnmasked)
6154     Ops.push_back(DAG.getUNDEF(ContainerVT));
6155   else
6156     Ops.push_back(PassThru);
6157   Ops.push_back(BasePtr);
6158   Ops.push_back(Index);
6159   if (!IsUnmasked)
6160     Ops.push_back(Mask);
6161   Ops.push_back(VL);
6162   if (!IsUnmasked)
6163     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6164 
6165   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6166   SDValue Result =
6167       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6168   Chain = Result.getValue(1);
6169 
6170   if (VT.isFixedLengthVector())
6171     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6172 
6173   return DAG.getMergeValues({Result, Chain}, DL);
6174 }
6175 
6176 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6177 // matched to a RVV indexed store. The RVV indexed store instructions only
6178 // support the "unsigned unscaled" addressing mode; indices are implicitly
6179 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6180 // signed or scaled indexing is extended to the XLEN value type and scaled
6181 // accordingly.
6182 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6183                                                 SelectionDAG &DAG) const {
6184   SDLoc DL(Op);
6185   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6186   EVT MemVT = MemSD->getMemoryVT();
6187   MachineMemOperand *MMO = MemSD->getMemOperand();
6188   SDValue Chain = MemSD->getChain();
6189   SDValue BasePtr = MemSD->getBasePtr();
6190 
6191   bool IsTruncatingStore = false;
6192   SDValue Index, Mask, Val, VL;
6193 
6194   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6195     Index = VPSN->getIndex();
6196     Mask = VPSN->getMask();
6197     Val = VPSN->getValue();
6198     VL = VPSN->getVectorLength();
6199     // VP doesn't support truncating stores.
6200     IsTruncatingStore = false;
6201   } else {
6202     // Else it must be a MSCATTER.
6203     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6204     Index = MSN->getIndex();
6205     Mask = MSN->getMask();
6206     Val = MSN->getValue();
6207     IsTruncatingStore = MSN->isTruncatingStore();
6208   }
6209 
6210   MVT VT = Val.getSimpleValueType();
6211   MVT IndexVT = Index.getSimpleValueType();
6212   MVT XLenVT = Subtarget.getXLenVT();
6213 
6214   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6215          "Unexpected VTs!");
6216   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6217   // Targets have to explicitly opt-in for extending vector loads and
6218   // truncating vector stores.
6219   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6220   (void)IsTruncatingStore;
6221 
6222   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6223   // the selection of the masked intrinsics doesn't do this for us.
6224   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6225 
6226   MVT ContainerVT = VT;
6227   if (VT.isFixedLengthVector()) {
6228     // We need to use the larger of the value and index type to determine the
6229     // scalable type to use so we don't increase LMUL for any operand/result.
6230     if (VT.bitsGE(IndexVT)) {
6231       ContainerVT = getContainerForFixedLengthVector(VT);
6232       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6233                                  ContainerVT.getVectorElementCount());
6234     } else {
6235       IndexVT = getContainerForFixedLengthVector(IndexVT);
6236       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6237                                      IndexVT.getVectorElementCount());
6238     }
6239 
6240     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6241     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6242 
6243     if (!IsUnmasked) {
6244       MVT MaskVT =
6245           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6246       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6247     }
6248   }
6249 
6250   if (!VL)
6251     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6252 
6253   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6254     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6255     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6256                                    VL);
6257     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6258                         TrueMask, VL);
6259   }
6260 
6261   unsigned IntID =
6262       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6263   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6264   Ops.push_back(Val);
6265   Ops.push_back(BasePtr);
6266   Ops.push_back(Index);
6267   if (!IsUnmasked)
6268     Ops.push_back(Mask);
6269   Ops.push_back(VL);
6270 
6271   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6272                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6273 }
6274 
6275 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6276                                                SelectionDAG &DAG) const {
6277   const MVT XLenVT = Subtarget.getXLenVT();
6278   SDLoc DL(Op);
6279   SDValue Chain = Op->getOperand(0);
6280   SDValue SysRegNo = DAG.getTargetConstant(
6281       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6282   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6283   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6284 
6285   // Encoding used for rounding mode in RISCV differs from that used in
6286   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6287   // table, which consists of a sequence of 4-bit fields, each representing
6288   // corresponding FLT_ROUNDS mode.
6289   static const int Table =
6290       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6291       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6292       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6293       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6294       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6295 
6296   SDValue Shift =
6297       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6298   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6299                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6300   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6301                                DAG.getConstant(7, DL, XLenVT));
6302 
6303   return DAG.getMergeValues({Masked, Chain}, DL);
6304 }
6305 
6306 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6307                                                SelectionDAG &DAG) const {
6308   const MVT XLenVT = Subtarget.getXLenVT();
6309   SDLoc DL(Op);
6310   SDValue Chain = Op->getOperand(0);
6311   SDValue RMValue = Op->getOperand(1);
6312   SDValue SysRegNo = DAG.getTargetConstant(
6313       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6314 
6315   // Encoding used for rounding mode in RISCV differs from that used in
6316   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6317   // a table, which consists of a sequence of 4-bit fields, each representing
6318   // corresponding RISCV mode.
6319   static const unsigned Table =
6320       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6321       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6322       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6323       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6324       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6325 
6326   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6327                               DAG.getConstant(2, DL, XLenVT));
6328   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6329                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6330   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6331                         DAG.getConstant(0x7, DL, XLenVT));
6332   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6333                      RMValue);
6334 }
6335 
6336 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6337   switch (IntNo) {
6338   default:
6339     llvm_unreachable("Unexpected Intrinsic");
6340   case Intrinsic::riscv_grev:
6341     return RISCVISD::GREVW;
6342   case Intrinsic::riscv_gorc:
6343     return RISCVISD::GORCW;
6344   case Intrinsic::riscv_bcompress:
6345     return RISCVISD::BCOMPRESSW;
6346   case Intrinsic::riscv_bdecompress:
6347     return RISCVISD::BDECOMPRESSW;
6348   case Intrinsic::riscv_bfp:
6349     return RISCVISD::BFPW;
6350   case Intrinsic::riscv_fsl:
6351     return RISCVISD::FSLW;
6352   case Intrinsic::riscv_fsr:
6353     return RISCVISD::FSRW;
6354   }
6355 }
6356 
6357 // Converts the given intrinsic to a i64 operation with any extension.
6358 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6359                                          unsigned IntNo) {
6360   SDLoc DL(N);
6361   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6362   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6363   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6364   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6365   // ReplaceNodeResults requires we maintain the same type for the return value.
6366   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6367 }
6368 
6369 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6370 // form of the given Opcode.
6371 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6372   switch (Opcode) {
6373   default:
6374     llvm_unreachable("Unexpected opcode");
6375   case ISD::SHL:
6376     return RISCVISD::SLLW;
6377   case ISD::SRA:
6378     return RISCVISD::SRAW;
6379   case ISD::SRL:
6380     return RISCVISD::SRLW;
6381   case ISD::SDIV:
6382     return RISCVISD::DIVW;
6383   case ISD::UDIV:
6384     return RISCVISD::DIVUW;
6385   case ISD::UREM:
6386     return RISCVISD::REMUW;
6387   case ISD::ROTL:
6388     return RISCVISD::ROLW;
6389   case ISD::ROTR:
6390     return RISCVISD::RORW;
6391   case RISCVISD::GREV:
6392     return RISCVISD::GREVW;
6393   case RISCVISD::GORC:
6394     return RISCVISD::GORCW;
6395   }
6396 }
6397 
6398 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6399 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6400 // otherwise be promoted to i64, making it difficult to select the
6401 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6402 // type i8/i16/i32 is lost.
6403 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6404                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6405   SDLoc DL(N);
6406   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6407   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6408   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6409   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6410   // ReplaceNodeResults requires we maintain the same type for the return value.
6411   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6412 }
6413 
6414 // Converts the given 32-bit operation to a i64 operation with signed extension
6415 // semantic to reduce the signed extension instructions.
6416 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6417   SDLoc DL(N);
6418   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6419   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6420   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6421   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6422                                DAG.getValueType(MVT::i32));
6423   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6424 }
6425 
6426 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6427                                              SmallVectorImpl<SDValue> &Results,
6428                                              SelectionDAG &DAG) const {
6429   SDLoc DL(N);
6430   switch (N->getOpcode()) {
6431   default:
6432     llvm_unreachable("Don't know how to custom type legalize this operation!");
6433   case ISD::STRICT_FP_TO_SINT:
6434   case ISD::STRICT_FP_TO_UINT:
6435   case ISD::FP_TO_SINT:
6436   case ISD::FP_TO_UINT: {
6437     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6438            "Unexpected custom legalisation");
6439     bool IsStrict = N->isStrictFPOpcode();
6440     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6441                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6442     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6443     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6444         TargetLowering::TypeSoftenFloat) {
6445       if (!isTypeLegal(Op0.getValueType()))
6446         return;
6447       if (IsStrict) {
6448         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6449                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6450         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6451         SDValue Res = DAG.getNode(
6452             Opc, DL, VTs, N->getOperand(0), Op0,
6453             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6454         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6455         Results.push_back(Res.getValue(1));
6456         return;
6457       }
6458       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6459       SDValue Res =
6460           DAG.getNode(Opc, DL, MVT::i64, Op0,
6461                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6462       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6463       return;
6464     }
6465     // If the FP type needs to be softened, emit a library call using the 'si'
6466     // version. If we left it to default legalization we'd end up with 'di'. If
6467     // the FP type doesn't need to be softened just let generic type
6468     // legalization promote the result type.
6469     RTLIB::Libcall LC;
6470     if (IsSigned)
6471       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6472     else
6473       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6474     MakeLibCallOptions CallOptions;
6475     EVT OpVT = Op0.getValueType();
6476     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6477     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6478     SDValue Result;
6479     std::tie(Result, Chain) =
6480         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6481     Results.push_back(Result);
6482     if (IsStrict)
6483       Results.push_back(Chain);
6484     break;
6485   }
6486   case ISD::READCYCLECOUNTER: {
6487     assert(!Subtarget.is64Bit() &&
6488            "READCYCLECOUNTER only has custom type legalization on riscv32");
6489 
6490     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6491     SDValue RCW =
6492         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6493 
6494     Results.push_back(
6495         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6496     Results.push_back(RCW.getValue(2));
6497     break;
6498   }
6499   case ISD::MUL: {
6500     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6501     unsigned XLen = Subtarget.getXLen();
6502     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6503     if (Size > XLen) {
6504       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6505       SDValue LHS = N->getOperand(0);
6506       SDValue RHS = N->getOperand(1);
6507       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6508 
6509       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6510       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6511       // We need exactly one side to be unsigned.
6512       if (LHSIsU == RHSIsU)
6513         return;
6514 
6515       auto MakeMULPair = [&](SDValue S, SDValue U) {
6516         MVT XLenVT = Subtarget.getXLenVT();
6517         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6518         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6519         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6520         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6521         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6522       };
6523 
6524       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6525       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6526 
6527       // The other operand should be signed, but still prefer MULH when
6528       // possible.
6529       if (RHSIsU && LHSIsS && !RHSIsS)
6530         Results.push_back(MakeMULPair(LHS, RHS));
6531       else if (LHSIsU && RHSIsS && !LHSIsS)
6532         Results.push_back(MakeMULPair(RHS, LHS));
6533 
6534       return;
6535     }
6536     LLVM_FALLTHROUGH;
6537   }
6538   case ISD::ADD:
6539   case ISD::SUB:
6540     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6541            "Unexpected custom legalisation");
6542     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6543     break;
6544   case ISD::SHL:
6545   case ISD::SRA:
6546   case ISD::SRL:
6547     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6548            "Unexpected custom legalisation");
6549     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6550       Results.push_back(customLegalizeToWOp(N, DAG));
6551       break;
6552     }
6553 
6554     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6555     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6556     // shift amount.
6557     if (N->getOpcode() == ISD::SHL) {
6558       SDLoc DL(N);
6559       SDValue NewOp0 =
6560           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6561       SDValue NewOp1 =
6562           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6563       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6564       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6565                                    DAG.getValueType(MVT::i32));
6566       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6567     }
6568 
6569     break;
6570   case ISD::ROTL:
6571   case ISD::ROTR:
6572     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6573            "Unexpected custom legalisation");
6574     Results.push_back(customLegalizeToWOp(N, DAG));
6575     break;
6576   case ISD::CTTZ:
6577   case ISD::CTTZ_ZERO_UNDEF:
6578   case ISD::CTLZ:
6579   case ISD::CTLZ_ZERO_UNDEF: {
6580     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6581            "Unexpected custom legalisation");
6582 
6583     SDValue NewOp0 =
6584         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6585     bool IsCTZ =
6586         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6587     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6588     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6589     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6590     return;
6591   }
6592   case ISD::SDIV:
6593   case ISD::UDIV:
6594   case ISD::UREM: {
6595     MVT VT = N->getSimpleValueType(0);
6596     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6597            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6598            "Unexpected custom legalisation");
6599     // Don't promote division/remainder by constant since we should expand those
6600     // to multiply by magic constant.
6601     // FIXME: What if the expansion is disabled for minsize.
6602     if (N->getOperand(1).getOpcode() == ISD::Constant)
6603       return;
6604 
6605     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6606     // the upper 32 bits. For other types we need to sign or zero extend
6607     // based on the opcode.
6608     unsigned ExtOpc = ISD::ANY_EXTEND;
6609     if (VT != MVT::i32)
6610       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6611                                            : ISD::ZERO_EXTEND;
6612 
6613     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6614     break;
6615   }
6616   case ISD::UADDO:
6617   case ISD::USUBO: {
6618     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6619            "Unexpected custom legalisation");
6620     bool IsAdd = N->getOpcode() == ISD::UADDO;
6621     // Create an ADDW or SUBW.
6622     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6623     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6624     SDValue Res =
6625         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6626     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6627                       DAG.getValueType(MVT::i32));
6628 
6629     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6630     // Since the inputs are sign extended from i32, this is equivalent to
6631     // comparing the lower 32 bits.
6632     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6633     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6634                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6635 
6636     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6637     Results.push_back(Overflow);
6638     return;
6639   }
6640   case ISD::UADDSAT:
6641   case ISD::USUBSAT: {
6642     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6643            "Unexpected custom legalisation");
6644     if (Subtarget.hasStdExtZbb()) {
6645       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6646       // sign extend allows overflow of the lower 32 bits to be detected on
6647       // the promoted size.
6648       SDValue LHS =
6649           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6650       SDValue RHS =
6651           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6652       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6653       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6654       return;
6655     }
6656 
6657     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6658     // promotion for UADDO/USUBO.
6659     Results.push_back(expandAddSubSat(N, DAG));
6660     return;
6661   }
6662   case ISD::BITCAST: {
6663     EVT VT = N->getValueType(0);
6664     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6665     SDValue Op0 = N->getOperand(0);
6666     EVT Op0VT = Op0.getValueType();
6667     MVT XLenVT = Subtarget.getXLenVT();
6668     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6669       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6670       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6671     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6672                Subtarget.hasStdExtF()) {
6673       SDValue FPConv =
6674           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6675       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6676     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6677                isTypeLegal(Op0VT)) {
6678       // Custom-legalize bitcasts from fixed-length vector types to illegal
6679       // scalar types in order to improve codegen. Bitcast the vector to a
6680       // one-element vector type whose element type is the same as the result
6681       // type, and extract the first element.
6682       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6683       if (isTypeLegal(BVT)) {
6684         SDValue BVec = DAG.getBitcast(BVT, Op0);
6685         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6686                                       DAG.getConstant(0, DL, XLenVT)));
6687       }
6688     }
6689     break;
6690   }
6691   case RISCVISD::GREV:
6692   case RISCVISD::GORC: {
6693     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6694            "Unexpected custom legalisation");
6695     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6696     // This is similar to customLegalizeToWOp, except that we pass the second
6697     // operand (a TargetConstant) straight through: it is already of type
6698     // XLenVT.
6699     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6700     SDValue NewOp0 =
6701         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6702     SDValue NewOp1 =
6703         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6704     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6705     // ReplaceNodeResults requires we maintain the same type for the return
6706     // value.
6707     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6708     break;
6709   }
6710   case RISCVISD::SHFL: {
6711     // There is no SHFLIW instruction, but we can just promote the operation.
6712     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6713            "Unexpected custom legalisation");
6714     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6715     SDValue NewOp0 =
6716         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6717     SDValue NewOp1 =
6718         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6719     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6720     // ReplaceNodeResults requires we maintain the same type for the return
6721     // value.
6722     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6723     break;
6724   }
6725   case ISD::BSWAP:
6726   case ISD::BITREVERSE: {
6727     MVT VT = N->getSimpleValueType(0);
6728     MVT XLenVT = Subtarget.getXLenVT();
6729     assert((VT == MVT::i8 || VT == MVT::i16 ||
6730             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6731            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6732     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6733     unsigned Imm = VT.getSizeInBits() - 1;
6734     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6735     if (N->getOpcode() == ISD::BSWAP)
6736       Imm &= ~0x7U;
6737     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6738     SDValue GREVI =
6739         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6740     // ReplaceNodeResults requires we maintain the same type for the return
6741     // value.
6742     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6743     break;
6744   }
6745   case ISD::FSHL:
6746   case ISD::FSHR: {
6747     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6748            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6749     SDValue NewOp0 =
6750         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6751     SDValue NewOp1 =
6752         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6753     SDValue NewShAmt =
6754         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6755     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6756     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6757     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6758                            DAG.getConstant(0x1f, DL, MVT::i64));
6759     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6760     // instruction use different orders. fshl will return its first operand for
6761     // shift of zero, fshr will return its second operand. fsl and fsr both
6762     // return rs1 so the ISD nodes need to have different operand orders.
6763     // Shift amount is in rs2.
6764     unsigned Opc = RISCVISD::FSLW;
6765     if (N->getOpcode() == ISD::FSHR) {
6766       std::swap(NewOp0, NewOp1);
6767       Opc = RISCVISD::FSRW;
6768     }
6769     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6770     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6771     break;
6772   }
6773   case ISD::EXTRACT_VECTOR_ELT: {
6774     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6775     // type is illegal (currently only vXi64 RV32).
6776     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6777     // transferred to the destination register. We issue two of these from the
6778     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6779     // first element.
6780     SDValue Vec = N->getOperand(0);
6781     SDValue Idx = N->getOperand(1);
6782 
6783     // The vector type hasn't been legalized yet so we can't issue target
6784     // specific nodes if it needs legalization.
6785     // FIXME: We would manually legalize if it's important.
6786     if (!isTypeLegal(Vec.getValueType()))
6787       return;
6788 
6789     MVT VecVT = Vec.getSimpleValueType();
6790 
6791     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6792            VecVT.getVectorElementType() == MVT::i64 &&
6793            "Unexpected EXTRACT_VECTOR_ELT legalization");
6794 
6795     // If this is a fixed vector, we need to convert it to a scalable vector.
6796     MVT ContainerVT = VecVT;
6797     if (VecVT.isFixedLengthVector()) {
6798       ContainerVT = getContainerForFixedLengthVector(VecVT);
6799       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6800     }
6801 
6802     MVT XLenVT = Subtarget.getXLenVT();
6803 
6804     // Use a VL of 1 to avoid processing more elements than we need.
6805     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6806     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6807     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6808 
6809     // Unless the index is known to be 0, we must slide the vector down to get
6810     // the desired element into index 0.
6811     if (!isNullConstant(Idx)) {
6812       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6813                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6814     }
6815 
6816     // Extract the lower XLEN bits of the correct vector element.
6817     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6818 
6819     // To extract the upper XLEN bits of the vector element, shift the first
6820     // element right by 32 bits and re-extract the lower XLEN bits.
6821     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6822                                      DAG.getUNDEF(ContainerVT),
6823                                      DAG.getConstant(32, DL, XLenVT), VL);
6824     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6825                                  ThirtyTwoV, Mask, VL);
6826 
6827     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6828 
6829     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6830     break;
6831   }
6832   case ISD::INTRINSIC_WO_CHAIN: {
6833     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6834     switch (IntNo) {
6835     default:
6836       llvm_unreachable(
6837           "Don't know how to custom type legalize this intrinsic!");
6838     case Intrinsic::riscv_grev:
6839     case Intrinsic::riscv_gorc:
6840     case Intrinsic::riscv_bcompress:
6841     case Intrinsic::riscv_bdecompress:
6842     case Intrinsic::riscv_bfp: {
6843       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6844              "Unexpected custom legalisation");
6845       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6846       break;
6847     }
6848     case Intrinsic::riscv_fsl:
6849     case Intrinsic::riscv_fsr: {
6850       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6851              "Unexpected custom legalisation");
6852       SDValue NewOp1 =
6853           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6854       SDValue NewOp2 =
6855           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6856       SDValue NewOp3 =
6857           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6858       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6859       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6860       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6861       break;
6862     }
6863     case Intrinsic::riscv_orc_b: {
6864       // Lower to the GORCI encoding for orc.b with the operand extended.
6865       SDValue NewOp =
6866           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6867       // If Zbp is enabled, use GORCIW which will sign extend the result.
6868       unsigned Opc =
6869           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6870       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6871                                 DAG.getConstant(7, DL, MVT::i64));
6872       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6873       return;
6874     }
6875     case Intrinsic::riscv_shfl:
6876     case Intrinsic::riscv_unshfl: {
6877       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6878              "Unexpected custom legalisation");
6879       SDValue NewOp1 =
6880           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6881       SDValue NewOp2 =
6882           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6883       unsigned Opc =
6884           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6885       // There is no (UN)SHFLIW. If the control word is a constant, we can use
6886       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
6887       // will be shuffled the same way as the lower 32 bit half, but the two
6888       // halves won't cross.
6889       if (isa<ConstantSDNode>(NewOp2)) {
6890         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6891                              DAG.getConstant(0xf, DL, MVT::i64));
6892         Opc =
6893             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6894       }
6895       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6896       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6897       break;
6898     }
6899     case Intrinsic::riscv_vmv_x_s: {
6900       EVT VT = N->getValueType(0);
6901       MVT XLenVT = Subtarget.getXLenVT();
6902       if (VT.bitsLT(XLenVT)) {
6903         // Simple case just extract using vmv.x.s and truncate.
6904         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6905                                       Subtarget.getXLenVT(), N->getOperand(1));
6906         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6907         return;
6908       }
6909 
6910       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6911              "Unexpected custom legalization");
6912 
6913       // We need to do the move in two steps.
6914       SDValue Vec = N->getOperand(1);
6915       MVT VecVT = Vec.getSimpleValueType();
6916 
6917       // First extract the lower XLEN bits of the element.
6918       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6919 
6920       // To extract the upper XLEN bits of the vector element, shift the first
6921       // element right by 32 bits and re-extract the lower XLEN bits.
6922       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6923       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6924       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6925       SDValue ThirtyTwoV =
6926           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
6927                       DAG.getConstant(32, DL, XLenVT), VL);
6928       SDValue LShr32 =
6929           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6930       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6931 
6932       Results.push_back(
6933           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6934       break;
6935     }
6936     }
6937     break;
6938   }
6939   case ISD::VECREDUCE_ADD:
6940   case ISD::VECREDUCE_AND:
6941   case ISD::VECREDUCE_OR:
6942   case ISD::VECREDUCE_XOR:
6943   case ISD::VECREDUCE_SMAX:
6944   case ISD::VECREDUCE_UMAX:
6945   case ISD::VECREDUCE_SMIN:
6946   case ISD::VECREDUCE_UMIN:
6947     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6948       Results.push_back(V);
6949     break;
6950   case ISD::VP_REDUCE_ADD:
6951   case ISD::VP_REDUCE_AND:
6952   case ISD::VP_REDUCE_OR:
6953   case ISD::VP_REDUCE_XOR:
6954   case ISD::VP_REDUCE_SMAX:
6955   case ISD::VP_REDUCE_UMAX:
6956   case ISD::VP_REDUCE_SMIN:
6957   case ISD::VP_REDUCE_UMIN:
6958     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6959       Results.push_back(V);
6960     break;
6961   case ISD::FLT_ROUNDS_: {
6962     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6963     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6964     Results.push_back(Res.getValue(0));
6965     Results.push_back(Res.getValue(1));
6966     break;
6967   }
6968   }
6969 }
6970 
6971 // A structure to hold one of the bit-manipulation patterns below. Together, a
6972 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6973 //   (or (and (shl x, 1), 0xAAAAAAAA),
6974 //       (and (srl x, 1), 0x55555555))
6975 struct RISCVBitmanipPat {
6976   SDValue Op;
6977   unsigned ShAmt;
6978   bool IsSHL;
6979 
6980   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6981     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6982   }
6983 };
6984 
6985 // Matches patterns of the form
6986 //   (and (shl x, C2), (C1 << C2))
6987 //   (and (srl x, C2), C1)
6988 //   (shl (and x, C1), C2)
6989 //   (srl (and x, (C1 << C2)), C2)
6990 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6991 // The expected masks for each shift amount are specified in BitmanipMasks where
6992 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6993 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6994 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6995 // XLen is 64.
6996 static Optional<RISCVBitmanipPat>
6997 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6998   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6999          "Unexpected number of masks");
7000   Optional<uint64_t> Mask;
7001   // Optionally consume a mask around the shift operation.
7002   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7003     Mask = Op.getConstantOperandVal(1);
7004     Op = Op.getOperand(0);
7005   }
7006   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7007     return None;
7008   bool IsSHL = Op.getOpcode() == ISD::SHL;
7009 
7010   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7011     return None;
7012   uint64_t ShAmt = Op.getConstantOperandVal(1);
7013 
7014   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7015   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7016     return None;
7017   // If we don't have enough masks for 64 bit, then we must be trying to
7018   // match SHFL so we're only allowed to shift 1/4 of the width.
7019   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7020     return None;
7021 
7022   SDValue Src = Op.getOperand(0);
7023 
7024   // The expected mask is shifted left when the AND is found around SHL
7025   // patterns.
7026   //   ((x >> 1) & 0x55555555)
7027   //   ((x << 1) & 0xAAAAAAAA)
7028   bool SHLExpMask = IsSHL;
7029 
7030   if (!Mask) {
7031     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7032     // the mask is all ones: consume that now.
7033     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7034       Mask = Src.getConstantOperandVal(1);
7035       Src = Src.getOperand(0);
7036       // The expected mask is now in fact shifted left for SRL, so reverse the
7037       // decision.
7038       //   ((x & 0xAAAAAAAA) >> 1)
7039       //   ((x & 0x55555555) << 1)
7040       SHLExpMask = !SHLExpMask;
7041     } else {
7042       // Use a default shifted mask of all-ones if there's no AND, truncated
7043       // down to the expected width. This simplifies the logic later on.
7044       Mask = maskTrailingOnes<uint64_t>(Width);
7045       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7046     }
7047   }
7048 
7049   unsigned MaskIdx = Log2_32(ShAmt);
7050   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7051 
7052   if (SHLExpMask)
7053     ExpMask <<= ShAmt;
7054 
7055   if (Mask != ExpMask)
7056     return None;
7057 
7058   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7059 }
7060 
7061 // Matches any of the following bit-manipulation patterns:
7062 //   (and (shl x, 1), (0x55555555 << 1))
7063 //   (and (srl x, 1), 0x55555555)
7064 //   (shl (and x, 0x55555555), 1)
7065 //   (srl (and x, (0x55555555 << 1)), 1)
7066 // where the shift amount and mask may vary thus:
7067 //   [1]  = 0x55555555 / 0xAAAAAAAA
7068 //   [2]  = 0x33333333 / 0xCCCCCCCC
7069 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7070 //   [8]  = 0x00FF00FF / 0xFF00FF00
7071 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7072 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7073 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7074   // These are the unshifted masks which we use to match bit-manipulation
7075   // patterns. They may be shifted left in certain circumstances.
7076   static const uint64_t BitmanipMasks[] = {
7077       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7078       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7079 
7080   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7081 }
7082 
7083 // Match the following pattern as a GREVI(W) operation
7084 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7085 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7086                                const RISCVSubtarget &Subtarget) {
7087   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7088   EVT VT = Op.getValueType();
7089 
7090   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7091     auto LHS = matchGREVIPat(Op.getOperand(0));
7092     auto RHS = matchGREVIPat(Op.getOperand(1));
7093     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7094       SDLoc DL(Op);
7095       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7096                          DAG.getConstant(LHS->ShAmt, DL, VT));
7097     }
7098   }
7099   return SDValue();
7100 }
7101 
7102 // Matches any the following pattern as a GORCI(W) operation
7103 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7104 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7105 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7106 // Note that with the variant of 3.,
7107 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7108 // the inner pattern will first be matched as GREVI and then the outer
7109 // pattern will be matched to GORC via the first rule above.
7110 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7111 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7112                                const RISCVSubtarget &Subtarget) {
7113   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7114   EVT VT = Op.getValueType();
7115 
7116   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7117     SDLoc DL(Op);
7118     SDValue Op0 = Op.getOperand(0);
7119     SDValue Op1 = Op.getOperand(1);
7120 
7121     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7122       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7123           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7124           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7125         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7126       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7127       if ((Reverse.getOpcode() == ISD::ROTL ||
7128            Reverse.getOpcode() == ISD::ROTR) &&
7129           Reverse.getOperand(0) == X &&
7130           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7131         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7132         if (RotAmt == (VT.getSizeInBits() / 2))
7133           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7134                              DAG.getConstant(RotAmt, DL, VT));
7135       }
7136       return SDValue();
7137     };
7138 
7139     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7140     if (SDValue V = MatchOROfReverse(Op0, Op1))
7141       return V;
7142     if (SDValue V = MatchOROfReverse(Op1, Op0))
7143       return V;
7144 
7145     // OR is commutable so canonicalize its OR operand to the left
7146     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7147       std::swap(Op0, Op1);
7148     if (Op0.getOpcode() != ISD::OR)
7149       return SDValue();
7150     SDValue OrOp0 = Op0.getOperand(0);
7151     SDValue OrOp1 = Op0.getOperand(1);
7152     auto LHS = matchGREVIPat(OrOp0);
7153     // OR is commutable so swap the operands and try again: x might have been
7154     // on the left
7155     if (!LHS) {
7156       std::swap(OrOp0, OrOp1);
7157       LHS = matchGREVIPat(OrOp0);
7158     }
7159     auto RHS = matchGREVIPat(Op1);
7160     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7161       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7162                          DAG.getConstant(LHS->ShAmt, DL, VT));
7163     }
7164   }
7165   return SDValue();
7166 }
7167 
7168 // Matches any of the following bit-manipulation patterns:
7169 //   (and (shl x, 1), (0x22222222 << 1))
7170 //   (and (srl x, 1), 0x22222222)
7171 //   (shl (and x, 0x22222222), 1)
7172 //   (srl (and x, (0x22222222 << 1)), 1)
7173 // where the shift amount and mask may vary thus:
7174 //   [1]  = 0x22222222 / 0x44444444
7175 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7176 //   [4]  = 0x00F000F0 / 0x0F000F00
7177 //   [8]  = 0x0000FF00 / 0x00FF0000
7178 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7179 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7180   // These are the unshifted masks which we use to match bit-manipulation
7181   // patterns. They may be shifted left in certain circumstances.
7182   static const uint64_t BitmanipMasks[] = {
7183       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7184       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7185 
7186   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7187 }
7188 
7189 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7190 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7191                                const RISCVSubtarget &Subtarget) {
7192   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7193   EVT VT = Op.getValueType();
7194 
7195   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7196     return SDValue();
7197 
7198   SDValue Op0 = Op.getOperand(0);
7199   SDValue Op1 = Op.getOperand(1);
7200 
7201   // Or is commutable so canonicalize the second OR to the LHS.
7202   if (Op0.getOpcode() != ISD::OR)
7203     std::swap(Op0, Op1);
7204   if (Op0.getOpcode() != ISD::OR)
7205     return SDValue();
7206 
7207   // We found an inner OR, so our operands are the operands of the inner OR
7208   // and the other operand of the outer OR.
7209   SDValue A = Op0.getOperand(0);
7210   SDValue B = Op0.getOperand(1);
7211   SDValue C = Op1;
7212 
7213   auto Match1 = matchSHFLPat(A);
7214   auto Match2 = matchSHFLPat(B);
7215 
7216   // If neither matched, we failed.
7217   if (!Match1 && !Match2)
7218     return SDValue();
7219 
7220   // We had at least one match. if one failed, try the remaining C operand.
7221   if (!Match1) {
7222     std::swap(A, C);
7223     Match1 = matchSHFLPat(A);
7224     if (!Match1)
7225       return SDValue();
7226   } else if (!Match2) {
7227     std::swap(B, C);
7228     Match2 = matchSHFLPat(B);
7229     if (!Match2)
7230       return SDValue();
7231   }
7232   assert(Match1 && Match2);
7233 
7234   // Make sure our matches pair up.
7235   if (!Match1->formsPairWith(*Match2))
7236     return SDValue();
7237 
7238   // All the remains is to make sure C is an AND with the same input, that masks
7239   // out the bits that are being shuffled.
7240   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7241       C.getOperand(0) != Match1->Op)
7242     return SDValue();
7243 
7244   uint64_t Mask = C.getConstantOperandVal(1);
7245 
7246   static const uint64_t BitmanipMasks[] = {
7247       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7248       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7249   };
7250 
7251   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7252   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7253   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7254 
7255   if (Mask != ExpMask)
7256     return SDValue();
7257 
7258   SDLoc DL(Op);
7259   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7260                      DAG.getConstant(Match1->ShAmt, DL, VT));
7261 }
7262 
7263 // Optimize (add (shl x, c0), (shl y, c1)) ->
7264 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7265 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7266                                   const RISCVSubtarget &Subtarget) {
7267   // Perform this optimization only in the zba extension.
7268   if (!Subtarget.hasStdExtZba())
7269     return SDValue();
7270 
7271   // Skip for vector types and larger types.
7272   EVT VT = N->getValueType(0);
7273   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7274     return SDValue();
7275 
7276   // The two operand nodes must be SHL and have no other use.
7277   SDValue N0 = N->getOperand(0);
7278   SDValue N1 = N->getOperand(1);
7279   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7280       !N0->hasOneUse() || !N1->hasOneUse())
7281     return SDValue();
7282 
7283   // Check c0 and c1.
7284   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7285   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7286   if (!N0C || !N1C)
7287     return SDValue();
7288   int64_t C0 = N0C->getSExtValue();
7289   int64_t C1 = N1C->getSExtValue();
7290   if (C0 <= 0 || C1 <= 0)
7291     return SDValue();
7292 
7293   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7294   int64_t Bits = std::min(C0, C1);
7295   int64_t Diff = std::abs(C0 - C1);
7296   if (Diff != 1 && Diff != 2 && Diff != 3)
7297     return SDValue();
7298 
7299   // Build nodes.
7300   SDLoc DL(N);
7301   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7302   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7303   SDValue NA0 =
7304       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7305   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7306   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7307 }
7308 
7309 // Combine
7310 // ROTR ((GREV x, 24), 16) -> (GREVI x, 8)
7311 // ROTL ((GREV x, 24), 16) -> (GREVI x, 8)
7312 // RORW ((GREVW x, 24), 16) -> (GREVIW x, 8)
7313 // ROLW ((GREVW x, 24), 16) -> (GREVIW x, 8)
7314 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG) {
7315   SDValue Src = N->getOperand(0);
7316   SDLoc DL(N);
7317   unsigned Opc;
7318 
7319   if ((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL) &&
7320       Src.getOpcode() == RISCVISD::GREV)
7321     Opc = RISCVISD::GREV;
7322   else if ((N->getOpcode() == RISCVISD::RORW ||
7323             N->getOpcode() == RISCVISD::ROLW) &&
7324            Src.getOpcode() == RISCVISD::GREVW)
7325     Opc = RISCVISD::GREVW;
7326   else
7327     return SDValue();
7328 
7329   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7330       !isa<ConstantSDNode>(Src.getOperand(1)))
7331     return SDValue();
7332 
7333   unsigned ShAmt1 = N->getConstantOperandVal(1);
7334   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7335   if (ShAmt1 != 16 && ShAmt2 != 24)
7336     return SDValue();
7337 
7338   Src = Src.getOperand(0);
7339   return DAG.getNode(Opc, DL, N->getValueType(0), Src,
7340                      DAG.getConstant(8, DL, N->getOperand(1).getValueType()));
7341 }
7342 
7343 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7344 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7345 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7346 // not undo itself, but they are redundant.
7347 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7348   SDValue Src = N->getOperand(0);
7349 
7350   if (Src.getOpcode() != N->getOpcode())
7351     return SDValue();
7352 
7353   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7354       !isa<ConstantSDNode>(Src.getOperand(1)))
7355     return SDValue();
7356 
7357   unsigned ShAmt1 = N->getConstantOperandVal(1);
7358   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7359   Src = Src.getOperand(0);
7360 
7361   unsigned CombinedShAmt;
7362   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7363     CombinedShAmt = ShAmt1 | ShAmt2;
7364   else
7365     CombinedShAmt = ShAmt1 ^ ShAmt2;
7366 
7367   if (CombinedShAmt == 0)
7368     return Src;
7369 
7370   SDLoc DL(N);
7371   return DAG.getNode(
7372       N->getOpcode(), DL, N->getValueType(0), Src,
7373       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7374 }
7375 
7376 // Combine a constant select operand into its use:
7377 //
7378 // (and (select cond, -1, c), x)
7379 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7380 // (or  (select cond, 0, c), x)
7381 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7382 // (xor (select cond, 0, c), x)
7383 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7384 // (add (select cond, 0, c), x)
7385 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7386 // (sub x, (select cond, 0, c))
7387 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7388 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7389                                    SelectionDAG &DAG, bool AllOnes) {
7390   EVT VT = N->getValueType(0);
7391 
7392   // Skip vectors.
7393   if (VT.isVector())
7394     return SDValue();
7395 
7396   if ((Slct.getOpcode() != ISD::SELECT &&
7397        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7398       !Slct.hasOneUse())
7399     return SDValue();
7400 
7401   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7402     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7403   };
7404 
7405   bool SwapSelectOps;
7406   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7407   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7408   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7409   SDValue NonConstantVal;
7410   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7411     SwapSelectOps = false;
7412     NonConstantVal = FalseVal;
7413   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7414     SwapSelectOps = true;
7415     NonConstantVal = TrueVal;
7416   } else
7417     return SDValue();
7418 
7419   // Slct is now know to be the desired identity constant when CC is true.
7420   TrueVal = OtherOp;
7421   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7422   // Unless SwapSelectOps says the condition should be false.
7423   if (SwapSelectOps)
7424     std::swap(TrueVal, FalseVal);
7425 
7426   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7427     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7428                        {Slct.getOperand(0), Slct.getOperand(1),
7429                         Slct.getOperand(2), TrueVal, FalseVal});
7430 
7431   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7432                      {Slct.getOperand(0), TrueVal, FalseVal});
7433 }
7434 
7435 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7436 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7437                                               bool AllOnes) {
7438   SDValue N0 = N->getOperand(0);
7439   SDValue N1 = N->getOperand(1);
7440   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7441     return Result;
7442   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7443     return Result;
7444   return SDValue();
7445 }
7446 
7447 // Transform (add (mul x, c0), c1) ->
7448 //           (add (mul (add x, c1/c0), c0), c1%c0).
7449 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7450 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7451 // to an infinite loop in DAGCombine if transformed.
7452 // Or transform (add (mul x, c0), c1) ->
7453 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7454 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7455 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7456 // lead to an infinite loop in DAGCombine if transformed.
7457 // Or transform (add (mul x, c0), c1) ->
7458 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7459 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7460 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7461 // lead to an infinite loop in DAGCombine if transformed.
7462 // Or transform (add (mul x, c0), c1) ->
7463 //              (mul (add x, c1/c0), c0).
7464 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7465 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7466                                      const RISCVSubtarget &Subtarget) {
7467   // Skip for vector types and larger types.
7468   EVT VT = N->getValueType(0);
7469   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7470     return SDValue();
7471   // The first operand node must be a MUL and has no other use.
7472   SDValue N0 = N->getOperand(0);
7473   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7474     return SDValue();
7475   // Check if c0 and c1 match above conditions.
7476   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7477   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7478   if (!N0C || !N1C)
7479     return SDValue();
7480   int64_t C0 = N0C->getSExtValue();
7481   int64_t C1 = N1C->getSExtValue();
7482   int64_t CA, CB;
7483   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7484     return SDValue();
7485   // Search for proper CA (non-zero) and CB that both are simm12.
7486   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7487       !isInt<12>(C0 * (C1 / C0))) {
7488     CA = C1 / C0;
7489     CB = C1 % C0;
7490   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7491              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7492     CA = C1 / C0 + 1;
7493     CB = C1 % C0 - C0;
7494   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7495              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7496     CA = C1 / C0 - 1;
7497     CB = C1 % C0 + C0;
7498   } else
7499     return SDValue();
7500   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7501   SDLoc DL(N);
7502   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7503                              DAG.getConstant(CA, DL, VT));
7504   SDValue New1 =
7505       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7506   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7507 }
7508 
7509 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7510                                  const RISCVSubtarget &Subtarget) {
7511   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7512     return V;
7513   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7514     return V;
7515   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7516   //      (select lhs, rhs, cc, x, (add x, y))
7517   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7518 }
7519 
7520 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7521   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7522   //      (select lhs, rhs, cc, x, (sub x, y))
7523   SDValue N0 = N->getOperand(0);
7524   SDValue N1 = N->getOperand(1);
7525   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7526 }
7527 
7528 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7529   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7530   //      (select lhs, rhs, cc, x, (and x, y))
7531   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7532 }
7533 
7534 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7535                                 const RISCVSubtarget &Subtarget) {
7536   if (Subtarget.hasStdExtZbp()) {
7537     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7538       return GREV;
7539     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7540       return GORC;
7541     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7542       return SHFL;
7543   }
7544 
7545   // fold (or (select cond, 0, y), x) ->
7546   //      (select cond, x, (or x, y))
7547   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7548 }
7549 
7550 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7551   // fold (xor (select cond, 0, y), x) ->
7552   //      (select cond, x, (xor x, y))
7553   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7554 }
7555 
7556 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7557 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7558 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7559 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7560 // ADDW/SUBW/MULW.
7561 static SDValue performANY_EXTENDCombine(SDNode *N,
7562                                         TargetLowering::DAGCombinerInfo &DCI,
7563                                         const RISCVSubtarget &Subtarget) {
7564   if (!Subtarget.is64Bit())
7565     return SDValue();
7566 
7567   SelectionDAG &DAG = DCI.DAG;
7568 
7569   SDValue Src = N->getOperand(0);
7570   EVT VT = N->getValueType(0);
7571   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7572     return SDValue();
7573 
7574   // The opcode must be one that can implicitly sign_extend.
7575   // FIXME: Additional opcodes.
7576   switch (Src.getOpcode()) {
7577   default:
7578     return SDValue();
7579   case ISD::MUL:
7580     if (!Subtarget.hasStdExtM())
7581       return SDValue();
7582     LLVM_FALLTHROUGH;
7583   case ISD::ADD:
7584   case ISD::SUB:
7585     break;
7586   }
7587 
7588   // Only handle cases where the result is used by a CopyToReg. That likely
7589   // means the value is a liveout of the basic block. This helps prevent
7590   // infinite combine loops like PR51206.
7591   if (none_of(N->uses(),
7592               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7593     return SDValue();
7594 
7595   SmallVector<SDNode *, 4> SetCCs;
7596   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7597                             UE = Src.getNode()->use_end();
7598        UI != UE; ++UI) {
7599     SDNode *User = *UI;
7600     if (User == N)
7601       continue;
7602     if (UI.getUse().getResNo() != Src.getResNo())
7603       continue;
7604     // All i32 setccs are legalized by sign extending operands.
7605     if (User->getOpcode() == ISD::SETCC) {
7606       SetCCs.push_back(User);
7607       continue;
7608     }
7609     // We don't know if we can extend this user.
7610     break;
7611   }
7612 
7613   // If we don't have any SetCCs, this isn't worthwhile.
7614   if (SetCCs.empty())
7615     return SDValue();
7616 
7617   SDLoc DL(N);
7618   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7619   DCI.CombineTo(N, SExt);
7620 
7621   // Promote all the setccs.
7622   for (SDNode *SetCC : SetCCs) {
7623     SmallVector<SDValue, 4> Ops;
7624 
7625     for (unsigned j = 0; j != 2; ++j) {
7626       SDValue SOp = SetCC->getOperand(j);
7627       if (SOp == Src)
7628         Ops.push_back(SExt);
7629       else
7630         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7631     }
7632 
7633     Ops.push_back(SetCC->getOperand(2));
7634     DCI.CombineTo(SetCC,
7635                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7636   }
7637   return SDValue(N, 0);
7638 }
7639 
7640 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7641 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7642 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7643                                              bool Commute = false) {
7644   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7645           N->getOpcode() == RISCVISD::SUB_VL) &&
7646          "Unexpected opcode");
7647   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7648   SDValue Op0 = N->getOperand(0);
7649   SDValue Op1 = N->getOperand(1);
7650   if (Commute)
7651     std::swap(Op0, Op1);
7652 
7653   MVT VT = N->getSimpleValueType(0);
7654 
7655   // Determine the narrow size for a widening add/sub.
7656   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7657   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7658                                   VT.getVectorElementCount());
7659 
7660   SDValue Mask = N->getOperand(2);
7661   SDValue VL = N->getOperand(3);
7662 
7663   SDLoc DL(N);
7664 
7665   // If the RHS is a sext or zext, we can form a widening op.
7666   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7667        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7668       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7669     unsigned ExtOpc = Op1.getOpcode();
7670     Op1 = Op1.getOperand(0);
7671     // Re-introduce narrower extends if needed.
7672     if (Op1.getValueType() != NarrowVT)
7673       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7674 
7675     unsigned WOpc;
7676     if (ExtOpc == RISCVISD::VSEXT_VL)
7677       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7678     else
7679       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7680 
7681     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7682   }
7683 
7684   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7685   // sext/zext?
7686 
7687   return SDValue();
7688 }
7689 
7690 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7691 // vwsub(u).vv/vx.
7692 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7693   SDValue Op0 = N->getOperand(0);
7694   SDValue Op1 = N->getOperand(1);
7695   SDValue Mask = N->getOperand(2);
7696   SDValue VL = N->getOperand(3);
7697 
7698   MVT VT = N->getSimpleValueType(0);
7699   MVT NarrowVT = Op1.getSimpleValueType();
7700   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7701 
7702   unsigned VOpc;
7703   switch (N->getOpcode()) {
7704   default: llvm_unreachable("Unexpected opcode");
7705   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7706   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7707   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7708   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7709   }
7710 
7711   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7712                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7713 
7714   SDLoc DL(N);
7715 
7716   // If the LHS is a sext or zext, we can narrow this op to the same size as
7717   // the RHS.
7718   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7719        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7720       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7721     unsigned ExtOpc = Op0.getOpcode();
7722     Op0 = Op0.getOperand(0);
7723     // Re-introduce narrower extends if needed.
7724     if (Op0.getValueType() != NarrowVT)
7725       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7726     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7727   }
7728 
7729   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7730                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7731 
7732   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7733   // to commute and use a vwadd(u).vx instead.
7734   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7735       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
7736     Op0 = Op0.getOperand(1);
7737 
7738     // See if have enough sign bits or zero bits in the scalar to use a
7739     // widening add/sub by splatting to smaller element size.
7740     unsigned EltBits = VT.getScalarSizeInBits();
7741     unsigned ScalarBits = Op0.getValueSizeInBits();
7742     // Make sure we're getting all element bits from the scalar register.
7743     // FIXME: Support implicit sign extension of vmv.v.x?
7744     if (ScalarBits < EltBits)
7745       return SDValue();
7746 
7747     if (IsSigned) {
7748       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7749         return SDValue();
7750     } else {
7751       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7752       if (!DAG.MaskedValueIsZero(Op0, Mask))
7753         return SDValue();
7754     }
7755 
7756     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7757                       DAG.getUNDEF(NarrowVT), Op0, VL);
7758     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7759   }
7760 
7761   return SDValue();
7762 }
7763 
7764 // Try to form VWMUL, VWMULU or VWMULSU.
7765 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7766 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7767                                        bool Commute) {
7768   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7769   SDValue Op0 = N->getOperand(0);
7770   SDValue Op1 = N->getOperand(1);
7771   if (Commute)
7772     std::swap(Op0, Op1);
7773 
7774   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7775   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7776   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7777   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7778     return SDValue();
7779 
7780   SDValue Mask = N->getOperand(2);
7781   SDValue VL = N->getOperand(3);
7782 
7783   // Make sure the mask and VL match.
7784   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7785     return SDValue();
7786 
7787   MVT VT = N->getSimpleValueType(0);
7788 
7789   // Determine the narrow size for a widening multiply.
7790   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7791   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7792                                   VT.getVectorElementCount());
7793 
7794   SDLoc DL(N);
7795 
7796   // See if the other operand is the same opcode.
7797   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7798     if (!Op1.hasOneUse())
7799       return SDValue();
7800 
7801     // Make sure the mask and VL match.
7802     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7803       return SDValue();
7804 
7805     Op1 = Op1.getOperand(0);
7806   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7807     // The operand is a splat of a scalar.
7808 
7809     // The pasthru must be undef for tail agnostic
7810     if (!Op1.getOperand(0).isUndef())
7811       return SDValue();
7812     // The VL must be the same.
7813     if (Op1.getOperand(2) != VL)
7814       return SDValue();
7815 
7816     // Get the scalar value.
7817     Op1 = Op1.getOperand(1);
7818 
7819     // See if have enough sign bits or zero bits in the scalar to use a
7820     // widening multiply by splatting to smaller element size.
7821     unsigned EltBits = VT.getScalarSizeInBits();
7822     unsigned ScalarBits = Op1.getValueSizeInBits();
7823     // Make sure we're getting all element bits from the scalar register.
7824     // FIXME: Support implicit sign extension of vmv.v.x?
7825     if (ScalarBits < EltBits)
7826       return SDValue();
7827 
7828     // If the LHS is a sign extend, try to use vwmul.
7829     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
7830       // Can use vwmul.
7831     } else {
7832       // Otherwise try to use vwmulu or vwmulsu.
7833       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7834       if (DAG.MaskedValueIsZero(Op1, Mask))
7835         IsVWMULSU = IsSignExt;
7836       else
7837         return SDValue();
7838     }
7839 
7840     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
7841                       DAG.getUNDEF(NarrowVT), Op1, VL);
7842   } else
7843     return SDValue();
7844 
7845   Op0 = Op0.getOperand(0);
7846 
7847   // Re-introduce narrower extends if needed.
7848   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7849   if (Op0.getValueType() != NarrowVT)
7850     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7851   // vwmulsu requires second operand to be zero extended.
7852   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
7853   if (Op1.getValueType() != NarrowVT)
7854     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7855 
7856   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7857   if (!IsVWMULSU)
7858     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7859   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7860 }
7861 
7862 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7863   switch (Op.getOpcode()) {
7864   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7865   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7866   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7867   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7868   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7869   }
7870 
7871   return RISCVFPRndMode::Invalid;
7872 }
7873 
7874 // Fold
7875 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7876 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7877 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7878 //   (fp_to_int (fceil X))      -> fcvt X, rup
7879 //   (fp_to_int (fround X))     -> fcvt X, rmm
7880 static SDValue performFP_TO_INTCombine(SDNode *N,
7881                                        TargetLowering::DAGCombinerInfo &DCI,
7882                                        const RISCVSubtarget &Subtarget) {
7883   SelectionDAG &DAG = DCI.DAG;
7884   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7885   MVT XLenVT = Subtarget.getXLenVT();
7886 
7887   // Only handle XLen or i32 types. Other types narrower than XLen will
7888   // eventually be legalized to XLenVT.
7889   EVT VT = N->getValueType(0);
7890   if (VT != MVT::i32 && VT != XLenVT)
7891     return SDValue();
7892 
7893   SDValue Src = N->getOperand(0);
7894 
7895   // Ensure the FP type is also legal.
7896   if (!TLI.isTypeLegal(Src.getValueType()))
7897     return SDValue();
7898 
7899   // Don't do this for f16 with Zfhmin and not Zfh.
7900   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7901     return SDValue();
7902 
7903   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7904   if (FRM == RISCVFPRndMode::Invalid)
7905     return SDValue();
7906 
7907   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7908 
7909   unsigned Opc;
7910   if (VT == XLenVT)
7911     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7912   else
7913     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7914 
7915   SDLoc DL(N);
7916   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7917                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7918   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7919 }
7920 
7921 // Fold
7922 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7923 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7924 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7925 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7926 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7927 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7928                                        TargetLowering::DAGCombinerInfo &DCI,
7929                                        const RISCVSubtarget &Subtarget) {
7930   SelectionDAG &DAG = DCI.DAG;
7931   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7932   MVT XLenVT = Subtarget.getXLenVT();
7933 
7934   // Only handle XLen types. Other types narrower than XLen will eventually be
7935   // legalized to XLenVT.
7936   EVT DstVT = N->getValueType(0);
7937   if (DstVT != XLenVT)
7938     return SDValue();
7939 
7940   SDValue Src = N->getOperand(0);
7941 
7942   // Ensure the FP type is also legal.
7943   if (!TLI.isTypeLegal(Src.getValueType()))
7944     return SDValue();
7945 
7946   // Don't do this for f16 with Zfhmin and not Zfh.
7947   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7948     return SDValue();
7949 
7950   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7951 
7952   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7953   if (FRM == RISCVFPRndMode::Invalid)
7954     return SDValue();
7955 
7956   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7957 
7958   unsigned Opc;
7959   if (SatVT == DstVT)
7960     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7961   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7962     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7963   else
7964     return SDValue();
7965   // FIXME: Support other SatVTs by clamping before or after the conversion.
7966 
7967   Src = Src.getOperand(0);
7968 
7969   SDLoc DL(N);
7970   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7971                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7972 
7973   // RISCV FP-to-int conversions saturate to the destination register size, but
7974   // don't produce 0 for nan.
7975   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7976   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7977 }
7978 
7979 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7980                                                DAGCombinerInfo &DCI) const {
7981   SelectionDAG &DAG = DCI.DAG;
7982 
7983   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7984   // bits are demanded. N will be added to the Worklist if it was not deleted.
7985   // Caller should return SDValue(N, 0) if this returns true.
7986   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7987     SDValue Op = N->getOperand(OpNo);
7988     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7989     if (!SimplifyDemandedBits(Op, Mask, DCI))
7990       return false;
7991 
7992     if (N->getOpcode() != ISD::DELETED_NODE)
7993       DCI.AddToWorklist(N);
7994     return true;
7995   };
7996 
7997   switch (N->getOpcode()) {
7998   default:
7999     break;
8000   case RISCVISD::SplitF64: {
8001     SDValue Op0 = N->getOperand(0);
8002     // If the input to SplitF64 is just BuildPairF64 then the operation is
8003     // redundant. Instead, use BuildPairF64's operands directly.
8004     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8005       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8006 
8007     if (Op0->isUndef()) {
8008       SDValue Lo = DAG.getUNDEF(MVT::i32);
8009       SDValue Hi = DAG.getUNDEF(MVT::i32);
8010       return DCI.CombineTo(N, Lo, Hi);
8011     }
8012 
8013     SDLoc DL(N);
8014 
8015     // It's cheaper to materialise two 32-bit integers than to load a double
8016     // from the constant pool and transfer it to integer registers through the
8017     // stack.
8018     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8019       APInt V = C->getValueAPF().bitcastToAPInt();
8020       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8021       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8022       return DCI.CombineTo(N, Lo, Hi);
8023     }
8024 
8025     // This is a target-specific version of a DAGCombine performed in
8026     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8027     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8028     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8029     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8030         !Op0.getNode()->hasOneUse())
8031       break;
8032     SDValue NewSplitF64 =
8033         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8034                     Op0.getOperand(0));
8035     SDValue Lo = NewSplitF64.getValue(0);
8036     SDValue Hi = NewSplitF64.getValue(1);
8037     APInt SignBit = APInt::getSignMask(32);
8038     if (Op0.getOpcode() == ISD::FNEG) {
8039       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8040                                   DAG.getConstant(SignBit, DL, MVT::i32));
8041       return DCI.CombineTo(N, Lo, NewHi);
8042     }
8043     assert(Op0.getOpcode() == ISD::FABS);
8044     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8045                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8046     return DCI.CombineTo(N, Lo, NewHi);
8047   }
8048   case RISCVISD::SLLW:
8049   case RISCVISD::SRAW:
8050   case RISCVISD::SRLW:
8051   case RISCVISD::ROLW:
8052   case RISCVISD::RORW: {
8053     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8054     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8055         SimplifyDemandedLowBitsHelper(1, 5))
8056       return SDValue(N, 0);
8057 
8058     return combineROTR_ROTL_RORW_ROLW(N, DAG);
8059   }
8060   case ISD::ROTR:
8061   case ISD::ROTL:
8062     return combineROTR_ROTL_RORW_ROLW(N, DAG);
8063   case RISCVISD::CLZW:
8064   case RISCVISD::CTZW: {
8065     // Only the lower 32 bits of the first operand are read
8066     if (SimplifyDemandedLowBitsHelper(0, 32))
8067       return SDValue(N, 0);
8068     break;
8069   }
8070   case RISCVISD::GREV:
8071   case RISCVISD::GORC: {
8072     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8073     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8074     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8075     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8076       return SDValue(N, 0);
8077 
8078     return combineGREVI_GORCI(N, DAG);
8079   }
8080   case RISCVISD::GREVW:
8081   case RISCVISD::GORCW: {
8082     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8083     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8084         SimplifyDemandedLowBitsHelper(1, 5))
8085       return SDValue(N, 0);
8086 
8087     return combineGREVI_GORCI(N, DAG);
8088   }
8089   case RISCVISD::SHFL:
8090   case RISCVISD::UNSHFL: {
8091     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8092     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8093     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8094     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8095       return SDValue(N, 0);
8096 
8097     break;
8098   }
8099   case RISCVISD::SHFLW:
8100   case RISCVISD::UNSHFLW: {
8101     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8102     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8103         SimplifyDemandedLowBitsHelper(1, 4))
8104       return SDValue(N, 0);
8105 
8106     break;
8107   }
8108   case RISCVISD::BCOMPRESSW:
8109   case RISCVISD::BDECOMPRESSW: {
8110     // Only the lower 32 bits of LHS and RHS are read.
8111     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8112         SimplifyDemandedLowBitsHelper(1, 32))
8113       return SDValue(N, 0);
8114 
8115     break;
8116   }
8117   case RISCVISD::FMV_X_ANYEXTH:
8118   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8119     SDLoc DL(N);
8120     SDValue Op0 = N->getOperand(0);
8121     MVT VT = N->getSimpleValueType(0);
8122     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8123     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8124     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8125     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8126          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8127         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8128          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8129       assert(Op0.getOperand(0).getValueType() == VT &&
8130              "Unexpected value type!");
8131       return Op0.getOperand(0);
8132     }
8133 
8134     // This is a target-specific version of a DAGCombine performed in
8135     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8136     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8137     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8138     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8139         !Op0.getNode()->hasOneUse())
8140       break;
8141     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8142     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8143     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8144     if (Op0.getOpcode() == ISD::FNEG)
8145       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8146                          DAG.getConstant(SignBit, DL, VT));
8147 
8148     assert(Op0.getOpcode() == ISD::FABS);
8149     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8150                        DAG.getConstant(~SignBit, DL, VT));
8151   }
8152   case ISD::ADD:
8153     return performADDCombine(N, DAG, Subtarget);
8154   case ISD::SUB:
8155     return performSUBCombine(N, DAG);
8156   case ISD::AND:
8157     return performANDCombine(N, DAG);
8158   case ISD::OR:
8159     return performORCombine(N, DAG, Subtarget);
8160   case ISD::XOR:
8161     return performXORCombine(N, DAG);
8162   case ISD::ANY_EXTEND:
8163     return performANY_EXTENDCombine(N, DCI, Subtarget);
8164   case ISD::ZERO_EXTEND:
8165     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8166     // type legalization. This is safe because fp_to_uint produces poison if
8167     // it overflows.
8168     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8169       SDValue Src = N->getOperand(0);
8170       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8171           isTypeLegal(Src.getOperand(0).getValueType()))
8172         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8173                            Src.getOperand(0));
8174       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8175           isTypeLegal(Src.getOperand(1).getValueType())) {
8176         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8177         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8178                                   Src.getOperand(0), Src.getOperand(1));
8179         DCI.CombineTo(N, Res);
8180         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8181         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8182         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8183       }
8184     }
8185     return SDValue();
8186   case RISCVISD::SELECT_CC: {
8187     // Transform
8188     SDValue LHS = N->getOperand(0);
8189     SDValue RHS = N->getOperand(1);
8190     SDValue TrueV = N->getOperand(3);
8191     SDValue FalseV = N->getOperand(4);
8192 
8193     // If the True and False values are the same, we don't need a select_cc.
8194     if (TrueV == FalseV)
8195       return TrueV;
8196 
8197     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8198     if (!ISD::isIntEqualitySetCC(CCVal))
8199       break;
8200 
8201     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8202     //      (select_cc X, Y, lt, trueV, falseV)
8203     // Sometimes the setcc is introduced after select_cc has been formed.
8204     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8205         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8206       // If we're looking for eq 0 instead of ne 0, we need to invert the
8207       // condition.
8208       bool Invert = CCVal == ISD::SETEQ;
8209       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8210       if (Invert)
8211         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8212 
8213       SDLoc DL(N);
8214       RHS = LHS.getOperand(1);
8215       LHS = LHS.getOperand(0);
8216       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8217 
8218       SDValue TargetCC = DAG.getCondCode(CCVal);
8219       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8220                          {LHS, RHS, TargetCC, TrueV, FalseV});
8221     }
8222 
8223     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8224     //      (select_cc X, Y, eq/ne, trueV, falseV)
8225     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8226       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8227                          {LHS.getOperand(0), LHS.getOperand(1),
8228                           N->getOperand(2), TrueV, FalseV});
8229     // (select_cc X, 1, setne, trueV, falseV) ->
8230     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8231     // This can occur when legalizing some floating point comparisons.
8232     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8233     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8234       SDLoc DL(N);
8235       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8236       SDValue TargetCC = DAG.getCondCode(CCVal);
8237       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8238       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8239                          {LHS, RHS, TargetCC, TrueV, FalseV});
8240     }
8241 
8242     break;
8243   }
8244   case RISCVISD::BR_CC: {
8245     SDValue LHS = N->getOperand(1);
8246     SDValue RHS = N->getOperand(2);
8247     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8248     if (!ISD::isIntEqualitySetCC(CCVal))
8249       break;
8250 
8251     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8252     //      (br_cc X, Y, lt, dest)
8253     // Sometimes the setcc is introduced after br_cc has been formed.
8254     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8255         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8256       // If we're looking for eq 0 instead of ne 0, we need to invert the
8257       // condition.
8258       bool Invert = CCVal == ISD::SETEQ;
8259       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8260       if (Invert)
8261         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8262 
8263       SDLoc DL(N);
8264       RHS = LHS.getOperand(1);
8265       LHS = LHS.getOperand(0);
8266       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8267 
8268       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8269                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8270                          N->getOperand(4));
8271     }
8272 
8273     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8274     //      (br_cc X, Y, eq/ne, trueV, falseV)
8275     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8276       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8277                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8278                          N->getOperand(3), N->getOperand(4));
8279 
8280     // (br_cc X, 1, setne, br_cc) ->
8281     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8282     // This can occur when legalizing some floating point comparisons.
8283     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8284     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8285       SDLoc DL(N);
8286       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8287       SDValue TargetCC = DAG.getCondCode(CCVal);
8288       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8289       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8290                          N->getOperand(0), LHS, RHS, TargetCC,
8291                          N->getOperand(4));
8292     }
8293     break;
8294   }
8295   case ISD::FP_TO_SINT:
8296   case ISD::FP_TO_UINT:
8297     return performFP_TO_INTCombine(N, DCI, Subtarget);
8298   case ISD::FP_TO_SINT_SAT:
8299   case ISD::FP_TO_UINT_SAT:
8300     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8301   case ISD::FCOPYSIGN: {
8302     EVT VT = N->getValueType(0);
8303     if (!VT.isVector())
8304       break;
8305     // There is a form of VFSGNJ which injects the negated sign of its second
8306     // operand. Try and bubble any FNEG up after the extend/round to produce
8307     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8308     // TRUNC=1.
8309     SDValue In2 = N->getOperand(1);
8310     // Avoid cases where the extend/round has multiple uses, as duplicating
8311     // those is typically more expensive than removing a fneg.
8312     if (!In2.hasOneUse())
8313       break;
8314     if (In2.getOpcode() != ISD::FP_EXTEND &&
8315         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8316       break;
8317     In2 = In2.getOperand(0);
8318     if (In2.getOpcode() != ISD::FNEG)
8319       break;
8320     SDLoc DL(N);
8321     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8322     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8323                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8324   }
8325   case ISD::MGATHER:
8326   case ISD::MSCATTER:
8327   case ISD::VP_GATHER:
8328   case ISD::VP_SCATTER: {
8329     if (!DCI.isBeforeLegalize())
8330       break;
8331     SDValue Index, ScaleOp;
8332     bool IsIndexScaled = false;
8333     bool IsIndexSigned = false;
8334     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8335       Index = VPGSN->getIndex();
8336       ScaleOp = VPGSN->getScale();
8337       IsIndexScaled = VPGSN->isIndexScaled();
8338       IsIndexSigned = VPGSN->isIndexSigned();
8339     } else {
8340       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8341       Index = MGSN->getIndex();
8342       ScaleOp = MGSN->getScale();
8343       IsIndexScaled = MGSN->isIndexScaled();
8344       IsIndexSigned = MGSN->isIndexSigned();
8345     }
8346     EVT IndexVT = Index.getValueType();
8347     MVT XLenVT = Subtarget.getXLenVT();
8348     // RISCV indexed loads only support the "unsigned unscaled" addressing
8349     // mode, so anything else must be manually legalized.
8350     bool NeedsIdxLegalization =
8351         IsIndexScaled ||
8352         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8353     if (!NeedsIdxLegalization)
8354       break;
8355 
8356     SDLoc DL(N);
8357 
8358     // Any index legalization should first promote to XLenVT, so we don't lose
8359     // bits when scaling. This may create an illegal index type so we let
8360     // LLVM's legalization take care of the splitting.
8361     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8362     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8363       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8364       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8365                           DL, IndexVT, Index);
8366     }
8367 
8368     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8369     if (IsIndexScaled && Scale != 1) {
8370       // Manually scale the indices by the element size.
8371       // TODO: Sanitize the scale operand here?
8372       // TODO: For VP nodes, should we use VP_SHL here?
8373       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8374       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8375       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8376     }
8377 
8378     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8379     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8380       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8381                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8382                               VPGN->getScale(), VPGN->getMask(),
8383                               VPGN->getVectorLength()},
8384                              VPGN->getMemOperand(), NewIndexTy);
8385     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8386       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8387                               {VPSN->getChain(), VPSN->getValue(),
8388                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8389                                VPSN->getMask(), VPSN->getVectorLength()},
8390                               VPSN->getMemOperand(), NewIndexTy);
8391     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8392       return DAG.getMaskedGather(
8393           N->getVTList(), MGN->getMemoryVT(), DL,
8394           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8395            MGN->getBasePtr(), Index, MGN->getScale()},
8396           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8397     const auto *MSN = cast<MaskedScatterSDNode>(N);
8398     return DAG.getMaskedScatter(
8399         N->getVTList(), MSN->getMemoryVT(), DL,
8400         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8401          Index, MSN->getScale()},
8402         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8403   }
8404   case RISCVISD::SRA_VL:
8405   case RISCVISD::SRL_VL:
8406   case RISCVISD::SHL_VL: {
8407     SDValue ShAmt = N->getOperand(1);
8408     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8409       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8410       SDLoc DL(N);
8411       SDValue VL = N->getOperand(3);
8412       EVT VT = N->getValueType(0);
8413       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8414                           ShAmt.getOperand(1), VL);
8415       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8416                          N->getOperand(2), N->getOperand(3));
8417     }
8418     break;
8419   }
8420   case ISD::SRA:
8421   case ISD::SRL:
8422   case ISD::SHL: {
8423     SDValue ShAmt = N->getOperand(1);
8424     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8425       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8426       SDLoc DL(N);
8427       EVT VT = N->getValueType(0);
8428       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8429                           ShAmt.getOperand(1),
8430                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8431       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8432     }
8433     break;
8434   }
8435   case RISCVISD::ADD_VL:
8436     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8437       return V;
8438     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8439   case RISCVISD::SUB_VL:
8440     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8441   case RISCVISD::VWADD_W_VL:
8442   case RISCVISD::VWADDU_W_VL:
8443   case RISCVISD::VWSUB_W_VL:
8444   case RISCVISD::VWSUBU_W_VL:
8445     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8446   case RISCVISD::MUL_VL:
8447     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8448       return V;
8449     // Mul is commutative.
8450     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8451   case ISD::STORE: {
8452     auto *Store = cast<StoreSDNode>(N);
8453     SDValue Val = Store->getValue();
8454     // Combine store of vmv.x.s to vse with VL of 1.
8455     // FIXME: Support FP.
8456     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8457       SDValue Src = Val.getOperand(0);
8458       EVT VecVT = Src.getValueType();
8459       EVT MemVT = Store->getMemoryVT();
8460       // The memory VT and the element type must match.
8461       if (VecVT.getVectorElementType() == MemVT) {
8462         SDLoc DL(N);
8463         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8464         return DAG.getStoreVP(
8465             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8466             DAG.getConstant(1, DL, MaskVT),
8467             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8468             Store->getMemOperand(), Store->getAddressingMode(),
8469             Store->isTruncatingStore(), /*IsCompress*/ false);
8470       }
8471     }
8472 
8473     break;
8474   }
8475   case ISD::SPLAT_VECTOR: {
8476     EVT VT = N->getValueType(0);
8477     // Only perform this combine on legal MVT types.
8478     if (!isTypeLegal(VT))
8479       break;
8480     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8481                                          DAG, Subtarget))
8482       return Gather;
8483     break;
8484   }
8485   case RISCVISD::VMV_V_X_VL: {
8486     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8487     // scalar input.
8488     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8489     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8490     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8491       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8492         return SDValue(N, 0);
8493 
8494     break;
8495   }
8496   }
8497 
8498   return SDValue();
8499 }
8500 
8501 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8502     const SDNode *N, CombineLevel Level) const {
8503   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8504   // materialised in fewer instructions than `(OP _, c1)`:
8505   //
8506   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8507   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8508   SDValue N0 = N->getOperand(0);
8509   EVT Ty = N0.getValueType();
8510   if (Ty.isScalarInteger() &&
8511       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8512     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8513     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8514     if (C1 && C2) {
8515       const APInt &C1Int = C1->getAPIntValue();
8516       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8517 
8518       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8519       // and the combine should happen, to potentially allow further combines
8520       // later.
8521       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8522           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8523         return true;
8524 
8525       // We can materialise `c1` in an add immediate, so it's "free", and the
8526       // combine should be prevented.
8527       if (C1Int.getMinSignedBits() <= 64 &&
8528           isLegalAddImmediate(C1Int.getSExtValue()))
8529         return false;
8530 
8531       // Neither constant will fit into an immediate, so find materialisation
8532       // costs.
8533       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8534                                               Subtarget.getFeatureBits(),
8535                                               /*CompressionCost*/true);
8536       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8537           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8538           /*CompressionCost*/true);
8539 
8540       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8541       // combine should be prevented.
8542       if (C1Cost < ShiftedC1Cost)
8543         return false;
8544     }
8545   }
8546   return true;
8547 }
8548 
8549 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8550     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8551     TargetLoweringOpt &TLO) const {
8552   // Delay this optimization as late as possible.
8553   if (!TLO.LegalOps)
8554     return false;
8555 
8556   EVT VT = Op.getValueType();
8557   if (VT.isVector())
8558     return false;
8559 
8560   // Only handle AND for now.
8561   if (Op.getOpcode() != ISD::AND)
8562     return false;
8563 
8564   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8565   if (!C)
8566     return false;
8567 
8568   const APInt &Mask = C->getAPIntValue();
8569 
8570   // Clear all non-demanded bits initially.
8571   APInt ShrunkMask = Mask & DemandedBits;
8572 
8573   // Try to make a smaller immediate by setting undemanded bits.
8574 
8575   APInt ExpandedMask = Mask | ~DemandedBits;
8576 
8577   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8578     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8579   };
8580   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8581     if (NewMask == Mask)
8582       return true;
8583     SDLoc DL(Op);
8584     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8585     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8586     return TLO.CombineTo(Op, NewOp);
8587   };
8588 
8589   // If the shrunk mask fits in sign extended 12 bits, let the target
8590   // independent code apply it.
8591   if (ShrunkMask.isSignedIntN(12))
8592     return false;
8593 
8594   // Preserve (and X, 0xffff) when zext.h is supported.
8595   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8596     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8597     if (IsLegalMask(NewMask))
8598       return UseMask(NewMask);
8599   }
8600 
8601   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8602   if (VT == MVT::i64) {
8603     APInt NewMask = APInt(64, 0xffffffff);
8604     if (IsLegalMask(NewMask))
8605       return UseMask(NewMask);
8606   }
8607 
8608   // For the remaining optimizations, we need to be able to make a negative
8609   // number through a combination of mask and undemanded bits.
8610   if (!ExpandedMask.isNegative())
8611     return false;
8612 
8613   // What is the fewest number of bits we need to represent the negative number.
8614   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8615 
8616   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8617   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8618   APInt NewMask = ShrunkMask;
8619   if (MinSignedBits <= 12)
8620     NewMask.setBitsFrom(11);
8621   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8622     NewMask.setBitsFrom(31);
8623   else
8624     return false;
8625 
8626   // Check that our new mask is a subset of the demanded mask.
8627   assert(IsLegalMask(NewMask));
8628   return UseMask(NewMask);
8629 }
8630 
8631 static void computeGREV(APInt &Src, unsigned ShAmt) {
8632   ShAmt &= Src.getBitWidth() - 1;
8633   uint64_t x = Src.getZExtValue();
8634   if (ShAmt & 1)
8635     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8636   if (ShAmt & 2)
8637     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8638   if (ShAmt & 4)
8639     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8640   if (ShAmt & 8)
8641     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8642   if (ShAmt & 16)
8643     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8644   if (ShAmt & 32)
8645     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8646   Src = x;
8647 }
8648 
8649 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8650                                                         KnownBits &Known,
8651                                                         const APInt &DemandedElts,
8652                                                         const SelectionDAG &DAG,
8653                                                         unsigned Depth) const {
8654   unsigned BitWidth = Known.getBitWidth();
8655   unsigned Opc = Op.getOpcode();
8656   assert((Opc >= ISD::BUILTIN_OP_END ||
8657           Opc == ISD::INTRINSIC_WO_CHAIN ||
8658           Opc == ISD::INTRINSIC_W_CHAIN ||
8659           Opc == ISD::INTRINSIC_VOID) &&
8660          "Should use MaskedValueIsZero if you don't know whether Op"
8661          " is a target node!");
8662 
8663   Known.resetAll();
8664   switch (Opc) {
8665   default: break;
8666   case RISCVISD::SELECT_CC: {
8667     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8668     // If we don't know any bits, early out.
8669     if (Known.isUnknown())
8670       break;
8671     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8672 
8673     // Only known if known in both the LHS and RHS.
8674     Known = KnownBits::commonBits(Known, Known2);
8675     break;
8676   }
8677   case RISCVISD::REMUW: {
8678     KnownBits Known2;
8679     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8680     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8681     // We only care about the lower 32 bits.
8682     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8683     // Restore the original width by sign extending.
8684     Known = Known.sext(BitWidth);
8685     break;
8686   }
8687   case RISCVISD::DIVUW: {
8688     KnownBits Known2;
8689     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8690     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8691     // We only care about the lower 32 bits.
8692     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8693     // Restore the original width by sign extending.
8694     Known = Known.sext(BitWidth);
8695     break;
8696   }
8697   case RISCVISD::CTZW: {
8698     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8699     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8700     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8701     Known.Zero.setBitsFrom(LowBits);
8702     break;
8703   }
8704   case RISCVISD::CLZW: {
8705     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8706     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8707     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8708     Known.Zero.setBitsFrom(LowBits);
8709     break;
8710   }
8711   case RISCVISD::GREV:
8712   case RISCVISD::GREVW: {
8713     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8714       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8715       if (Opc == RISCVISD::GREVW)
8716         Known = Known.trunc(32);
8717       unsigned ShAmt = C->getZExtValue();
8718       computeGREV(Known.Zero, ShAmt);
8719       computeGREV(Known.One, ShAmt);
8720       if (Opc == RISCVISD::GREVW)
8721         Known = Known.sext(BitWidth);
8722     }
8723     break;
8724   }
8725   case RISCVISD::READ_VLENB: {
8726     // If we know the minimum VLen from Zvl extensions, we can use that to
8727     // determine the trailing zeros of VLENB.
8728     // FIXME: Limit to 128 bit vectors until we have more testing.
8729     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8730     if (MinVLenB > 0)
8731       Known.Zero.setLowBits(Log2_32(MinVLenB));
8732     // We assume VLENB is no more than 65536 / 8 bytes.
8733     Known.Zero.setBitsFrom(14);
8734     break;
8735   }
8736   case ISD::INTRINSIC_W_CHAIN:
8737   case ISD::INTRINSIC_WO_CHAIN: {
8738     unsigned IntNo =
8739         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8740     switch (IntNo) {
8741     default:
8742       // We can't do anything for most intrinsics.
8743       break;
8744     case Intrinsic::riscv_vsetvli:
8745     case Intrinsic::riscv_vsetvlimax:
8746     case Intrinsic::riscv_vsetvli_opt:
8747     case Intrinsic::riscv_vsetvlimax_opt:
8748       // Assume that VL output is positive and would fit in an int32_t.
8749       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8750       if (BitWidth >= 32)
8751         Known.Zero.setBitsFrom(31);
8752       break;
8753     }
8754     break;
8755   }
8756   }
8757 }
8758 
8759 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8760     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8761     unsigned Depth) const {
8762   switch (Op.getOpcode()) {
8763   default:
8764     break;
8765   case RISCVISD::SELECT_CC: {
8766     unsigned Tmp =
8767         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8768     if (Tmp == 1) return 1;  // Early out.
8769     unsigned Tmp2 =
8770         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8771     return std::min(Tmp, Tmp2);
8772   }
8773   case RISCVISD::SLLW:
8774   case RISCVISD::SRAW:
8775   case RISCVISD::SRLW:
8776   case RISCVISD::DIVW:
8777   case RISCVISD::DIVUW:
8778   case RISCVISD::REMUW:
8779   case RISCVISD::ROLW:
8780   case RISCVISD::RORW:
8781   case RISCVISD::GREVW:
8782   case RISCVISD::GORCW:
8783   case RISCVISD::FSLW:
8784   case RISCVISD::FSRW:
8785   case RISCVISD::SHFLW:
8786   case RISCVISD::UNSHFLW:
8787   case RISCVISD::BCOMPRESSW:
8788   case RISCVISD::BDECOMPRESSW:
8789   case RISCVISD::BFPW:
8790   case RISCVISD::FCVT_W_RV64:
8791   case RISCVISD::FCVT_WU_RV64:
8792   case RISCVISD::STRICT_FCVT_W_RV64:
8793   case RISCVISD::STRICT_FCVT_WU_RV64:
8794     // TODO: As the result is sign-extended, this is conservatively correct. A
8795     // more precise answer could be calculated for SRAW depending on known
8796     // bits in the shift amount.
8797     return 33;
8798   case RISCVISD::SHFL:
8799   case RISCVISD::UNSHFL: {
8800     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8801     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8802     // will stay within the upper 32 bits. If there were more than 32 sign bits
8803     // before there will be at least 33 sign bits after.
8804     if (Op.getValueType() == MVT::i64 &&
8805         isa<ConstantSDNode>(Op.getOperand(1)) &&
8806         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8807       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8808       if (Tmp > 32)
8809         return 33;
8810     }
8811     break;
8812   }
8813   case RISCVISD::VMV_X_S: {
8814     // The number of sign bits of the scalar result is computed by obtaining the
8815     // element type of the input vector operand, subtracting its width from the
8816     // XLEN, and then adding one (sign bit within the element type). If the
8817     // element type is wider than XLen, the least-significant XLEN bits are
8818     // taken.
8819     unsigned XLen = Subtarget.getXLen();
8820     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
8821     if (EltBits <= XLen)
8822       return XLen - EltBits + 1;
8823     break;
8824   }
8825   }
8826 
8827   return 1;
8828 }
8829 
8830 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8831                                                   MachineBasicBlock *BB) {
8832   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8833 
8834   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8835   // Should the count have wrapped while it was being read, we need to try
8836   // again.
8837   // ...
8838   // read:
8839   // rdcycleh x3 # load high word of cycle
8840   // rdcycle  x2 # load low word of cycle
8841   // rdcycleh x4 # load high word of cycle
8842   // bne x3, x4, read # check if high word reads match, otherwise try again
8843   // ...
8844 
8845   MachineFunction &MF = *BB->getParent();
8846   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8847   MachineFunction::iterator It = ++BB->getIterator();
8848 
8849   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8850   MF.insert(It, LoopMBB);
8851 
8852   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8853   MF.insert(It, DoneMBB);
8854 
8855   // Transfer the remainder of BB and its successor edges to DoneMBB.
8856   DoneMBB->splice(DoneMBB->begin(), BB,
8857                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8858   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8859 
8860   BB->addSuccessor(LoopMBB);
8861 
8862   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8863   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8864   Register LoReg = MI.getOperand(0).getReg();
8865   Register HiReg = MI.getOperand(1).getReg();
8866   DebugLoc DL = MI.getDebugLoc();
8867 
8868   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8869   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8870       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8871       .addReg(RISCV::X0);
8872   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8873       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8874       .addReg(RISCV::X0);
8875   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8876       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8877       .addReg(RISCV::X0);
8878 
8879   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8880       .addReg(HiReg)
8881       .addReg(ReadAgainReg)
8882       .addMBB(LoopMBB);
8883 
8884   LoopMBB->addSuccessor(LoopMBB);
8885   LoopMBB->addSuccessor(DoneMBB);
8886 
8887   MI.eraseFromParent();
8888 
8889   return DoneMBB;
8890 }
8891 
8892 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8893                                              MachineBasicBlock *BB) {
8894   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8895 
8896   MachineFunction &MF = *BB->getParent();
8897   DebugLoc DL = MI.getDebugLoc();
8898   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8899   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8900   Register LoReg = MI.getOperand(0).getReg();
8901   Register HiReg = MI.getOperand(1).getReg();
8902   Register SrcReg = MI.getOperand(2).getReg();
8903   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8904   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8905 
8906   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8907                           RI);
8908   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8909   MachineMemOperand *MMOLo =
8910       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8911   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8912       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8913   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8914       .addFrameIndex(FI)
8915       .addImm(0)
8916       .addMemOperand(MMOLo);
8917   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8918       .addFrameIndex(FI)
8919       .addImm(4)
8920       .addMemOperand(MMOHi);
8921   MI.eraseFromParent(); // The pseudo instruction is gone now.
8922   return BB;
8923 }
8924 
8925 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8926                                                  MachineBasicBlock *BB) {
8927   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8928          "Unexpected instruction");
8929 
8930   MachineFunction &MF = *BB->getParent();
8931   DebugLoc DL = MI.getDebugLoc();
8932   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8933   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8934   Register DstReg = MI.getOperand(0).getReg();
8935   Register LoReg = MI.getOperand(1).getReg();
8936   Register HiReg = MI.getOperand(2).getReg();
8937   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8938   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8939 
8940   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8941   MachineMemOperand *MMOLo =
8942       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8943   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8944       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8945   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8946       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8947       .addFrameIndex(FI)
8948       .addImm(0)
8949       .addMemOperand(MMOLo);
8950   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8951       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8952       .addFrameIndex(FI)
8953       .addImm(4)
8954       .addMemOperand(MMOHi);
8955   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8956   MI.eraseFromParent(); // The pseudo instruction is gone now.
8957   return BB;
8958 }
8959 
8960 static bool isSelectPseudo(MachineInstr &MI) {
8961   switch (MI.getOpcode()) {
8962   default:
8963     return false;
8964   case RISCV::Select_GPR_Using_CC_GPR:
8965   case RISCV::Select_FPR16_Using_CC_GPR:
8966   case RISCV::Select_FPR32_Using_CC_GPR:
8967   case RISCV::Select_FPR64_Using_CC_GPR:
8968     return true;
8969   }
8970 }
8971 
8972 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8973                                         unsigned RelOpcode, unsigned EqOpcode,
8974                                         const RISCVSubtarget &Subtarget) {
8975   DebugLoc DL = MI.getDebugLoc();
8976   Register DstReg = MI.getOperand(0).getReg();
8977   Register Src1Reg = MI.getOperand(1).getReg();
8978   Register Src2Reg = MI.getOperand(2).getReg();
8979   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8980   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8981   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8982 
8983   // Save the current FFLAGS.
8984   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8985 
8986   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8987                  .addReg(Src1Reg)
8988                  .addReg(Src2Reg);
8989   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8990     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8991 
8992   // Restore the FFLAGS.
8993   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8994       .addReg(SavedFFlags, RegState::Kill);
8995 
8996   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8997   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8998                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8999                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9000   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9001     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9002 
9003   // Erase the pseudoinstruction.
9004   MI.eraseFromParent();
9005   return BB;
9006 }
9007 
9008 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9009                                            MachineBasicBlock *BB,
9010                                            const RISCVSubtarget &Subtarget) {
9011   // To "insert" Select_* instructions, we actually have to insert the triangle
9012   // control-flow pattern.  The incoming instructions know the destination vreg
9013   // to set, the condition code register to branch on, the true/false values to
9014   // select between, and the condcode to use to select the appropriate branch.
9015   //
9016   // We produce the following control flow:
9017   //     HeadMBB
9018   //     |  \
9019   //     |  IfFalseMBB
9020   //     | /
9021   //    TailMBB
9022   //
9023   // When we find a sequence of selects we attempt to optimize their emission
9024   // by sharing the control flow. Currently we only handle cases where we have
9025   // multiple selects with the exact same condition (same LHS, RHS and CC).
9026   // The selects may be interleaved with other instructions if the other
9027   // instructions meet some requirements we deem safe:
9028   // - They are debug instructions. Otherwise,
9029   // - They do not have side-effects, do not access memory and their inputs do
9030   //   not depend on the results of the select pseudo-instructions.
9031   // The TrueV/FalseV operands of the selects cannot depend on the result of
9032   // previous selects in the sequence.
9033   // These conditions could be further relaxed. See the X86 target for a
9034   // related approach and more information.
9035   Register LHS = MI.getOperand(1).getReg();
9036   Register RHS = MI.getOperand(2).getReg();
9037   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9038 
9039   SmallVector<MachineInstr *, 4> SelectDebugValues;
9040   SmallSet<Register, 4> SelectDests;
9041   SelectDests.insert(MI.getOperand(0).getReg());
9042 
9043   MachineInstr *LastSelectPseudo = &MI;
9044 
9045   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9046        SequenceMBBI != E; ++SequenceMBBI) {
9047     if (SequenceMBBI->isDebugInstr())
9048       continue;
9049     else if (isSelectPseudo(*SequenceMBBI)) {
9050       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9051           SequenceMBBI->getOperand(2).getReg() != RHS ||
9052           SequenceMBBI->getOperand(3).getImm() != CC ||
9053           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9054           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9055         break;
9056       LastSelectPseudo = &*SequenceMBBI;
9057       SequenceMBBI->collectDebugValues(SelectDebugValues);
9058       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9059     } else {
9060       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9061           SequenceMBBI->mayLoadOrStore())
9062         break;
9063       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9064             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9065           }))
9066         break;
9067     }
9068   }
9069 
9070   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9071   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9072   DebugLoc DL = MI.getDebugLoc();
9073   MachineFunction::iterator I = ++BB->getIterator();
9074 
9075   MachineBasicBlock *HeadMBB = BB;
9076   MachineFunction *F = BB->getParent();
9077   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9078   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9079 
9080   F->insert(I, IfFalseMBB);
9081   F->insert(I, TailMBB);
9082 
9083   // Transfer debug instructions associated with the selects to TailMBB.
9084   for (MachineInstr *DebugInstr : SelectDebugValues) {
9085     TailMBB->push_back(DebugInstr->removeFromParent());
9086   }
9087 
9088   // Move all instructions after the sequence to TailMBB.
9089   TailMBB->splice(TailMBB->end(), HeadMBB,
9090                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9091   // Update machine-CFG edges by transferring all successors of the current
9092   // block to the new block which will contain the Phi nodes for the selects.
9093   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9094   // Set the successors for HeadMBB.
9095   HeadMBB->addSuccessor(IfFalseMBB);
9096   HeadMBB->addSuccessor(TailMBB);
9097 
9098   // Insert appropriate branch.
9099   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9100     .addReg(LHS)
9101     .addReg(RHS)
9102     .addMBB(TailMBB);
9103 
9104   // IfFalseMBB just falls through to TailMBB.
9105   IfFalseMBB->addSuccessor(TailMBB);
9106 
9107   // Create PHIs for all of the select pseudo-instructions.
9108   auto SelectMBBI = MI.getIterator();
9109   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9110   auto InsertionPoint = TailMBB->begin();
9111   while (SelectMBBI != SelectEnd) {
9112     auto Next = std::next(SelectMBBI);
9113     if (isSelectPseudo(*SelectMBBI)) {
9114       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9115       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9116               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9117           .addReg(SelectMBBI->getOperand(4).getReg())
9118           .addMBB(HeadMBB)
9119           .addReg(SelectMBBI->getOperand(5).getReg())
9120           .addMBB(IfFalseMBB);
9121       SelectMBBI->eraseFromParent();
9122     }
9123     SelectMBBI = Next;
9124   }
9125 
9126   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9127   return TailMBB;
9128 }
9129 
9130 MachineBasicBlock *
9131 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9132                                                  MachineBasicBlock *BB) const {
9133   switch (MI.getOpcode()) {
9134   default:
9135     llvm_unreachable("Unexpected instr type to insert");
9136   case RISCV::ReadCycleWide:
9137     assert(!Subtarget.is64Bit() &&
9138            "ReadCycleWrite is only to be used on riscv32");
9139     return emitReadCycleWidePseudo(MI, BB);
9140   case RISCV::Select_GPR_Using_CC_GPR:
9141   case RISCV::Select_FPR16_Using_CC_GPR:
9142   case RISCV::Select_FPR32_Using_CC_GPR:
9143   case RISCV::Select_FPR64_Using_CC_GPR:
9144     return emitSelectPseudo(MI, BB, Subtarget);
9145   case RISCV::BuildPairF64Pseudo:
9146     return emitBuildPairF64Pseudo(MI, BB);
9147   case RISCV::SplitF64Pseudo:
9148     return emitSplitF64Pseudo(MI, BB);
9149   case RISCV::PseudoQuietFLE_H:
9150     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9151   case RISCV::PseudoQuietFLT_H:
9152     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9153   case RISCV::PseudoQuietFLE_S:
9154     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9155   case RISCV::PseudoQuietFLT_S:
9156     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9157   case RISCV::PseudoQuietFLE_D:
9158     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9159   case RISCV::PseudoQuietFLT_D:
9160     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9161   }
9162 }
9163 
9164 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9165                                                         SDNode *Node) const {
9166   // Add FRM dependency to any instructions with dynamic rounding mode.
9167   unsigned Opc = MI.getOpcode();
9168   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9169   if (Idx < 0)
9170     return;
9171   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9172     return;
9173   // If the instruction already reads FRM, don't add another read.
9174   if (MI.readsRegister(RISCV::FRM))
9175     return;
9176   MI.addOperand(
9177       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9178 }
9179 
9180 // Calling Convention Implementation.
9181 // The expectations for frontend ABI lowering vary from target to target.
9182 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9183 // details, but this is a longer term goal. For now, we simply try to keep the
9184 // role of the frontend as simple and well-defined as possible. The rules can
9185 // be summarised as:
9186 // * Never split up large scalar arguments. We handle them here.
9187 // * If a hardfloat calling convention is being used, and the struct may be
9188 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9189 // available, then pass as two separate arguments. If either the GPRs or FPRs
9190 // are exhausted, then pass according to the rule below.
9191 // * If a struct could never be passed in registers or directly in a stack
9192 // slot (as it is larger than 2*XLEN and the floating point rules don't
9193 // apply), then pass it using a pointer with the byval attribute.
9194 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9195 // word-sized array or a 2*XLEN scalar (depending on alignment).
9196 // * The frontend can determine whether a struct is returned by reference or
9197 // not based on its size and fields. If it will be returned by reference, the
9198 // frontend must modify the prototype so a pointer with the sret annotation is
9199 // passed as the first argument. This is not necessary for large scalar
9200 // returns.
9201 // * Struct return values and varargs should be coerced to structs containing
9202 // register-size fields in the same situations they would be for fixed
9203 // arguments.
9204 
9205 static const MCPhysReg ArgGPRs[] = {
9206   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9207   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9208 };
9209 static const MCPhysReg ArgFPR16s[] = {
9210   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9211   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9212 };
9213 static const MCPhysReg ArgFPR32s[] = {
9214   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9215   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9216 };
9217 static const MCPhysReg ArgFPR64s[] = {
9218   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9219   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9220 };
9221 // This is an interim calling convention and it may be changed in the future.
9222 static const MCPhysReg ArgVRs[] = {
9223     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9224     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9225     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9226 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9227                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9228                                      RISCV::V20M2, RISCV::V22M2};
9229 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9230                                      RISCV::V20M4};
9231 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9232 
9233 // Pass a 2*XLEN argument that has been split into two XLEN values through
9234 // registers or the stack as necessary.
9235 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9236                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9237                                 MVT ValVT2, MVT LocVT2,
9238                                 ISD::ArgFlagsTy ArgFlags2) {
9239   unsigned XLenInBytes = XLen / 8;
9240   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9241     // At least one half can be passed via register.
9242     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9243                                      VA1.getLocVT(), CCValAssign::Full));
9244   } else {
9245     // Both halves must be passed on the stack, with proper alignment.
9246     Align StackAlign =
9247         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9248     State.addLoc(
9249         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9250                             State.AllocateStack(XLenInBytes, StackAlign),
9251                             VA1.getLocVT(), CCValAssign::Full));
9252     State.addLoc(CCValAssign::getMem(
9253         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9254         LocVT2, CCValAssign::Full));
9255     return false;
9256   }
9257 
9258   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9259     // The second half can also be passed via register.
9260     State.addLoc(
9261         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9262   } else {
9263     // The second half is passed via the stack, without additional alignment.
9264     State.addLoc(CCValAssign::getMem(
9265         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9266         LocVT2, CCValAssign::Full));
9267   }
9268 
9269   return false;
9270 }
9271 
9272 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9273                                Optional<unsigned> FirstMaskArgument,
9274                                CCState &State, const RISCVTargetLowering &TLI) {
9275   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9276   if (RC == &RISCV::VRRegClass) {
9277     // Assign the first mask argument to V0.
9278     // This is an interim calling convention and it may be changed in the
9279     // future.
9280     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9281       return State.AllocateReg(RISCV::V0);
9282     return State.AllocateReg(ArgVRs);
9283   }
9284   if (RC == &RISCV::VRM2RegClass)
9285     return State.AllocateReg(ArgVRM2s);
9286   if (RC == &RISCV::VRM4RegClass)
9287     return State.AllocateReg(ArgVRM4s);
9288   if (RC == &RISCV::VRM8RegClass)
9289     return State.AllocateReg(ArgVRM8s);
9290   llvm_unreachable("Unhandled register class for ValueType");
9291 }
9292 
9293 // Implements the RISC-V calling convention. Returns true upon failure.
9294 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9295                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9296                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9297                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9298                      Optional<unsigned> FirstMaskArgument) {
9299   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9300   assert(XLen == 32 || XLen == 64);
9301   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9302 
9303   // Any return value split in to more than two values can't be returned
9304   // directly. Vectors are returned via the available vector registers.
9305   if (!LocVT.isVector() && IsRet && ValNo > 1)
9306     return true;
9307 
9308   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9309   // variadic argument, or if no F16/F32 argument registers are available.
9310   bool UseGPRForF16_F32 = true;
9311   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9312   // variadic argument, or if no F64 argument registers are available.
9313   bool UseGPRForF64 = true;
9314 
9315   switch (ABI) {
9316   default:
9317     llvm_unreachable("Unexpected ABI");
9318   case RISCVABI::ABI_ILP32:
9319   case RISCVABI::ABI_LP64:
9320     break;
9321   case RISCVABI::ABI_ILP32F:
9322   case RISCVABI::ABI_LP64F:
9323     UseGPRForF16_F32 = !IsFixed;
9324     break;
9325   case RISCVABI::ABI_ILP32D:
9326   case RISCVABI::ABI_LP64D:
9327     UseGPRForF16_F32 = !IsFixed;
9328     UseGPRForF64 = !IsFixed;
9329     break;
9330   }
9331 
9332   // FPR16, FPR32, and FPR64 alias each other.
9333   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9334     UseGPRForF16_F32 = true;
9335     UseGPRForF64 = true;
9336   }
9337 
9338   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9339   // similar local variables rather than directly checking against the target
9340   // ABI.
9341 
9342   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9343     LocVT = XLenVT;
9344     LocInfo = CCValAssign::BCvt;
9345   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9346     LocVT = MVT::i64;
9347     LocInfo = CCValAssign::BCvt;
9348   }
9349 
9350   // If this is a variadic argument, the RISC-V calling convention requires
9351   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9352   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9353   // be used regardless of whether the original argument was split during
9354   // legalisation or not. The argument will not be passed by registers if the
9355   // original type is larger than 2*XLEN, so the register alignment rule does
9356   // not apply.
9357   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9358   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9359       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9360     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9361     // Skip 'odd' register if necessary.
9362     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9363       State.AllocateReg(ArgGPRs);
9364   }
9365 
9366   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9367   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9368       State.getPendingArgFlags();
9369 
9370   assert(PendingLocs.size() == PendingArgFlags.size() &&
9371          "PendingLocs and PendingArgFlags out of sync");
9372 
9373   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9374   // registers are exhausted.
9375   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9376     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9377            "Can't lower f64 if it is split");
9378     // Depending on available argument GPRS, f64 may be passed in a pair of
9379     // GPRs, split between a GPR and the stack, or passed completely on the
9380     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9381     // cases.
9382     Register Reg = State.AllocateReg(ArgGPRs);
9383     LocVT = MVT::i32;
9384     if (!Reg) {
9385       unsigned StackOffset = State.AllocateStack(8, Align(8));
9386       State.addLoc(
9387           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9388       return false;
9389     }
9390     if (!State.AllocateReg(ArgGPRs))
9391       State.AllocateStack(4, Align(4));
9392     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9393     return false;
9394   }
9395 
9396   // Fixed-length vectors are located in the corresponding scalable-vector
9397   // container types.
9398   if (ValVT.isFixedLengthVector())
9399     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9400 
9401   // Split arguments might be passed indirectly, so keep track of the pending
9402   // values. Split vectors are passed via a mix of registers and indirectly, so
9403   // treat them as we would any other argument.
9404   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9405     LocVT = XLenVT;
9406     LocInfo = CCValAssign::Indirect;
9407     PendingLocs.push_back(
9408         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9409     PendingArgFlags.push_back(ArgFlags);
9410     if (!ArgFlags.isSplitEnd()) {
9411       return false;
9412     }
9413   }
9414 
9415   // If the split argument only had two elements, it should be passed directly
9416   // in registers or on the stack.
9417   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9418       PendingLocs.size() <= 2) {
9419     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9420     // Apply the normal calling convention rules to the first half of the
9421     // split argument.
9422     CCValAssign VA = PendingLocs[0];
9423     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9424     PendingLocs.clear();
9425     PendingArgFlags.clear();
9426     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9427                                ArgFlags);
9428   }
9429 
9430   // Allocate to a register if possible, or else a stack slot.
9431   Register Reg;
9432   unsigned StoreSizeBytes = XLen / 8;
9433   Align StackAlign = Align(XLen / 8);
9434 
9435   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9436     Reg = State.AllocateReg(ArgFPR16s);
9437   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9438     Reg = State.AllocateReg(ArgFPR32s);
9439   else if (ValVT == MVT::f64 && !UseGPRForF64)
9440     Reg = State.AllocateReg(ArgFPR64s);
9441   else if (ValVT.isVector()) {
9442     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9443     if (!Reg) {
9444       // For return values, the vector must be passed fully via registers or
9445       // via the stack.
9446       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9447       // but we're using all of them.
9448       if (IsRet)
9449         return true;
9450       // Try using a GPR to pass the address
9451       if ((Reg = State.AllocateReg(ArgGPRs))) {
9452         LocVT = XLenVT;
9453         LocInfo = CCValAssign::Indirect;
9454       } else if (ValVT.isScalableVector()) {
9455         LocVT = XLenVT;
9456         LocInfo = CCValAssign::Indirect;
9457       } else {
9458         // Pass fixed-length vectors on the stack.
9459         LocVT = ValVT;
9460         StoreSizeBytes = ValVT.getStoreSize();
9461         // Align vectors to their element sizes, being careful for vXi1
9462         // vectors.
9463         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9464       }
9465     }
9466   } else {
9467     Reg = State.AllocateReg(ArgGPRs);
9468   }
9469 
9470   unsigned StackOffset =
9471       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9472 
9473   // If we reach this point and PendingLocs is non-empty, we must be at the
9474   // end of a split argument that must be passed indirectly.
9475   if (!PendingLocs.empty()) {
9476     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9477     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9478 
9479     for (auto &It : PendingLocs) {
9480       if (Reg)
9481         It.convertToReg(Reg);
9482       else
9483         It.convertToMem(StackOffset);
9484       State.addLoc(It);
9485     }
9486     PendingLocs.clear();
9487     PendingArgFlags.clear();
9488     return false;
9489   }
9490 
9491   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9492           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9493          "Expected an XLenVT or vector types at this stage");
9494 
9495   if (Reg) {
9496     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9497     return false;
9498   }
9499 
9500   // When a floating-point value is passed on the stack, no bit-conversion is
9501   // needed.
9502   if (ValVT.isFloatingPoint()) {
9503     LocVT = ValVT;
9504     LocInfo = CCValAssign::Full;
9505   }
9506   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9507   return false;
9508 }
9509 
9510 template <typename ArgTy>
9511 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9512   for (const auto &ArgIdx : enumerate(Args)) {
9513     MVT ArgVT = ArgIdx.value().VT;
9514     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9515       return ArgIdx.index();
9516   }
9517   return None;
9518 }
9519 
9520 void RISCVTargetLowering::analyzeInputArgs(
9521     MachineFunction &MF, CCState &CCInfo,
9522     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9523     RISCVCCAssignFn Fn) const {
9524   unsigned NumArgs = Ins.size();
9525   FunctionType *FType = MF.getFunction().getFunctionType();
9526 
9527   Optional<unsigned> FirstMaskArgument;
9528   if (Subtarget.hasVInstructions())
9529     FirstMaskArgument = preAssignMask(Ins);
9530 
9531   for (unsigned i = 0; i != NumArgs; ++i) {
9532     MVT ArgVT = Ins[i].VT;
9533     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9534 
9535     Type *ArgTy = nullptr;
9536     if (IsRet)
9537       ArgTy = FType->getReturnType();
9538     else if (Ins[i].isOrigArg())
9539       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9540 
9541     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9542     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9543            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9544            FirstMaskArgument)) {
9545       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9546                         << EVT(ArgVT).getEVTString() << '\n');
9547       llvm_unreachable(nullptr);
9548     }
9549   }
9550 }
9551 
9552 void RISCVTargetLowering::analyzeOutputArgs(
9553     MachineFunction &MF, CCState &CCInfo,
9554     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9555     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9556   unsigned NumArgs = Outs.size();
9557 
9558   Optional<unsigned> FirstMaskArgument;
9559   if (Subtarget.hasVInstructions())
9560     FirstMaskArgument = preAssignMask(Outs);
9561 
9562   for (unsigned i = 0; i != NumArgs; i++) {
9563     MVT ArgVT = Outs[i].VT;
9564     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9565     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9566 
9567     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9568     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9569            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9570            FirstMaskArgument)) {
9571       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9572                         << EVT(ArgVT).getEVTString() << "\n");
9573       llvm_unreachable(nullptr);
9574     }
9575   }
9576 }
9577 
9578 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9579 // values.
9580 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9581                                    const CCValAssign &VA, const SDLoc &DL,
9582                                    const RISCVSubtarget &Subtarget) {
9583   switch (VA.getLocInfo()) {
9584   default:
9585     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9586   case CCValAssign::Full:
9587     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9588       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9589     break;
9590   case CCValAssign::BCvt:
9591     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9592       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9593     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9594       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9595     else
9596       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9597     break;
9598   }
9599   return Val;
9600 }
9601 
9602 // The caller is responsible for loading the full value if the argument is
9603 // passed with CCValAssign::Indirect.
9604 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9605                                 const CCValAssign &VA, const SDLoc &DL,
9606                                 const RISCVTargetLowering &TLI) {
9607   MachineFunction &MF = DAG.getMachineFunction();
9608   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9609   EVT LocVT = VA.getLocVT();
9610   SDValue Val;
9611   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9612   Register VReg = RegInfo.createVirtualRegister(RC);
9613   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9614   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9615 
9616   if (VA.getLocInfo() == CCValAssign::Indirect)
9617     return Val;
9618 
9619   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9620 }
9621 
9622 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9623                                    const CCValAssign &VA, const SDLoc &DL,
9624                                    const RISCVSubtarget &Subtarget) {
9625   EVT LocVT = VA.getLocVT();
9626 
9627   switch (VA.getLocInfo()) {
9628   default:
9629     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9630   case CCValAssign::Full:
9631     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9632       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9633     break;
9634   case CCValAssign::BCvt:
9635     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9636       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9637     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9638       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9639     else
9640       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9641     break;
9642   }
9643   return Val;
9644 }
9645 
9646 // The caller is responsible for loading the full value if the argument is
9647 // passed with CCValAssign::Indirect.
9648 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9649                                 const CCValAssign &VA, const SDLoc &DL) {
9650   MachineFunction &MF = DAG.getMachineFunction();
9651   MachineFrameInfo &MFI = MF.getFrameInfo();
9652   EVT LocVT = VA.getLocVT();
9653   EVT ValVT = VA.getValVT();
9654   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9655   if (ValVT.isScalableVector()) {
9656     // When the value is a scalable vector, we save the pointer which points to
9657     // the scalable vector value in the stack. The ValVT will be the pointer
9658     // type, instead of the scalable vector type.
9659     ValVT = LocVT;
9660   }
9661   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9662                                  /*IsImmutable=*/true);
9663   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9664   SDValue Val;
9665 
9666   ISD::LoadExtType ExtType;
9667   switch (VA.getLocInfo()) {
9668   default:
9669     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9670   case CCValAssign::Full:
9671   case CCValAssign::Indirect:
9672   case CCValAssign::BCvt:
9673     ExtType = ISD::NON_EXTLOAD;
9674     break;
9675   }
9676   Val = DAG.getExtLoad(
9677       ExtType, DL, LocVT, Chain, FIN,
9678       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9679   return Val;
9680 }
9681 
9682 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9683                                        const CCValAssign &VA, const SDLoc &DL) {
9684   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9685          "Unexpected VA");
9686   MachineFunction &MF = DAG.getMachineFunction();
9687   MachineFrameInfo &MFI = MF.getFrameInfo();
9688   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9689 
9690   if (VA.isMemLoc()) {
9691     // f64 is passed on the stack.
9692     int FI =
9693         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9694     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9695     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9696                        MachinePointerInfo::getFixedStack(MF, FI));
9697   }
9698 
9699   assert(VA.isRegLoc() && "Expected register VA assignment");
9700 
9701   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9702   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9703   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9704   SDValue Hi;
9705   if (VA.getLocReg() == RISCV::X17) {
9706     // Second half of f64 is passed on the stack.
9707     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9708     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9709     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9710                      MachinePointerInfo::getFixedStack(MF, FI));
9711   } else {
9712     // Second half of f64 is passed in another GPR.
9713     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9714     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9715     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9716   }
9717   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9718 }
9719 
9720 // FastCC has less than 1% performance improvement for some particular
9721 // benchmark. But theoretically, it may has benenfit for some cases.
9722 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9723                             unsigned ValNo, MVT ValVT, MVT LocVT,
9724                             CCValAssign::LocInfo LocInfo,
9725                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9726                             bool IsFixed, bool IsRet, Type *OrigTy,
9727                             const RISCVTargetLowering &TLI,
9728                             Optional<unsigned> FirstMaskArgument) {
9729 
9730   // X5 and X6 might be used for save-restore libcall.
9731   static const MCPhysReg GPRList[] = {
9732       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9733       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9734       RISCV::X29, RISCV::X30, RISCV::X31};
9735 
9736   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9737     if (unsigned Reg = State.AllocateReg(GPRList)) {
9738       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9739       return false;
9740     }
9741   }
9742 
9743   if (LocVT == MVT::f16) {
9744     static const MCPhysReg FPR16List[] = {
9745         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9746         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9747         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9748         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9749     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9750       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9751       return false;
9752     }
9753   }
9754 
9755   if (LocVT == MVT::f32) {
9756     static const MCPhysReg FPR32List[] = {
9757         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9758         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9759         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9760         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9761     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9762       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9763       return false;
9764     }
9765   }
9766 
9767   if (LocVT == MVT::f64) {
9768     static const MCPhysReg FPR64List[] = {
9769         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9770         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9771         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9772         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9773     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9774       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9775       return false;
9776     }
9777   }
9778 
9779   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9780     unsigned Offset4 = State.AllocateStack(4, Align(4));
9781     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9782     return false;
9783   }
9784 
9785   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9786     unsigned Offset5 = State.AllocateStack(8, Align(8));
9787     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9788     return false;
9789   }
9790 
9791   if (LocVT.isVector()) {
9792     if (unsigned Reg =
9793             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9794       // Fixed-length vectors are located in the corresponding scalable-vector
9795       // container types.
9796       if (ValVT.isFixedLengthVector())
9797         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9798       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9799     } else {
9800       // Try and pass the address via a "fast" GPR.
9801       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9802         LocInfo = CCValAssign::Indirect;
9803         LocVT = TLI.getSubtarget().getXLenVT();
9804         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9805       } else if (ValVT.isFixedLengthVector()) {
9806         auto StackAlign =
9807             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9808         unsigned StackOffset =
9809             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9810         State.addLoc(
9811             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9812       } else {
9813         // Can't pass scalable vectors on the stack.
9814         return true;
9815       }
9816     }
9817 
9818     return false;
9819   }
9820 
9821   return true; // CC didn't match.
9822 }
9823 
9824 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9825                          CCValAssign::LocInfo LocInfo,
9826                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9827 
9828   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9829     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9830     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9831     static const MCPhysReg GPRList[] = {
9832         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9833         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9834     if (unsigned Reg = State.AllocateReg(GPRList)) {
9835       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9836       return false;
9837     }
9838   }
9839 
9840   if (LocVT == MVT::f32) {
9841     // Pass in STG registers: F1, ..., F6
9842     //                        fs0 ... fs5
9843     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9844                                           RISCV::F18_F, RISCV::F19_F,
9845                                           RISCV::F20_F, RISCV::F21_F};
9846     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9847       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9848       return false;
9849     }
9850   }
9851 
9852   if (LocVT == MVT::f64) {
9853     // Pass in STG registers: D1, ..., D6
9854     //                        fs6 ... fs11
9855     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9856                                           RISCV::F24_D, RISCV::F25_D,
9857                                           RISCV::F26_D, RISCV::F27_D};
9858     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9859       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9860       return false;
9861     }
9862   }
9863 
9864   report_fatal_error("No registers left in GHC calling convention");
9865   return true;
9866 }
9867 
9868 // Transform physical registers into virtual registers.
9869 SDValue RISCVTargetLowering::LowerFormalArguments(
9870     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9871     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9872     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9873 
9874   MachineFunction &MF = DAG.getMachineFunction();
9875 
9876   switch (CallConv) {
9877   default:
9878     report_fatal_error("Unsupported calling convention");
9879   case CallingConv::C:
9880   case CallingConv::Fast:
9881     break;
9882   case CallingConv::GHC:
9883     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9884         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9885       report_fatal_error(
9886         "GHC calling convention requires the F and D instruction set extensions");
9887   }
9888 
9889   const Function &Func = MF.getFunction();
9890   if (Func.hasFnAttribute("interrupt")) {
9891     if (!Func.arg_empty())
9892       report_fatal_error(
9893         "Functions with the interrupt attribute cannot have arguments!");
9894 
9895     StringRef Kind =
9896       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9897 
9898     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9899       report_fatal_error(
9900         "Function interrupt attribute argument not supported!");
9901   }
9902 
9903   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9904   MVT XLenVT = Subtarget.getXLenVT();
9905   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9906   // Used with vargs to acumulate store chains.
9907   std::vector<SDValue> OutChains;
9908 
9909   // Assign locations to all of the incoming arguments.
9910   SmallVector<CCValAssign, 16> ArgLocs;
9911   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9912 
9913   if (CallConv == CallingConv::GHC)
9914     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9915   else
9916     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9917                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9918                                                    : CC_RISCV);
9919 
9920   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9921     CCValAssign &VA = ArgLocs[i];
9922     SDValue ArgValue;
9923     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9924     // case.
9925     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9926       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9927     else if (VA.isRegLoc())
9928       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9929     else
9930       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9931 
9932     if (VA.getLocInfo() == CCValAssign::Indirect) {
9933       // If the original argument was split and passed by reference (e.g. i128
9934       // on RV32), we need to load all parts of it here (using the same
9935       // address). Vectors may be partly split to registers and partly to the
9936       // stack, in which case the base address is partly offset and subsequent
9937       // stores are relative to that.
9938       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9939                                    MachinePointerInfo()));
9940       unsigned ArgIndex = Ins[i].OrigArgIndex;
9941       unsigned ArgPartOffset = Ins[i].PartOffset;
9942       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9943       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9944         CCValAssign &PartVA = ArgLocs[i + 1];
9945         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9946         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9947         if (PartVA.getValVT().isScalableVector())
9948           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9949         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9950         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9951                                      MachinePointerInfo()));
9952         ++i;
9953       }
9954       continue;
9955     }
9956     InVals.push_back(ArgValue);
9957   }
9958 
9959   if (IsVarArg) {
9960     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9961     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9962     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9963     MachineFrameInfo &MFI = MF.getFrameInfo();
9964     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9965     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9966 
9967     // Offset of the first variable argument from stack pointer, and size of
9968     // the vararg save area. For now, the varargs save area is either zero or
9969     // large enough to hold a0-a7.
9970     int VaArgOffset, VarArgsSaveSize;
9971 
9972     // If all registers are allocated, then all varargs must be passed on the
9973     // stack and we don't need to save any argregs.
9974     if (ArgRegs.size() == Idx) {
9975       VaArgOffset = CCInfo.getNextStackOffset();
9976       VarArgsSaveSize = 0;
9977     } else {
9978       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9979       VaArgOffset = -VarArgsSaveSize;
9980     }
9981 
9982     // Record the frame index of the first variable argument
9983     // which is a value necessary to VASTART.
9984     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9985     RVFI->setVarArgsFrameIndex(FI);
9986 
9987     // If saving an odd number of registers then create an extra stack slot to
9988     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9989     // offsets to even-numbered registered remain 2*XLEN-aligned.
9990     if (Idx % 2) {
9991       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9992       VarArgsSaveSize += XLenInBytes;
9993     }
9994 
9995     // Copy the integer registers that may have been used for passing varargs
9996     // to the vararg save area.
9997     for (unsigned I = Idx; I < ArgRegs.size();
9998          ++I, VaArgOffset += XLenInBytes) {
9999       const Register Reg = RegInfo.createVirtualRegister(RC);
10000       RegInfo.addLiveIn(ArgRegs[I], Reg);
10001       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10002       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10003       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10004       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10005                                    MachinePointerInfo::getFixedStack(MF, FI));
10006       cast<StoreSDNode>(Store.getNode())
10007           ->getMemOperand()
10008           ->setValue((Value *)nullptr);
10009       OutChains.push_back(Store);
10010     }
10011     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10012   }
10013 
10014   // All stores are grouped in one node to allow the matching between
10015   // the size of Ins and InVals. This only happens for vararg functions.
10016   if (!OutChains.empty()) {
10017     OutChains.push_back(Chain);
10018     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10019   }
10020 
10021   return Chain;
10022 }
10023 
10024 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10025 /// for tail call optimization.
10026 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10027 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10028     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10029     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10030 
10031   auto &Callee = CLI.Callee;
10032   auto CalleeCC = CLI.CallConv;
10033   auto &Outs = CLI.Outs;
10034   auto &Caller = MF.getFunction();
10035   auto CallerCC = Caller.getCallingConv();
10036 
10037   // Exception-handling functions need a special set of instructions to
10038   // indicate a return to the hardware. Tail-calling another function would
10039   // probably break this.
10040   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10041   // should be expanded as new function attributes are introduced.
10042   if (Caller.hasFnAttribute("interrupt"))
10043     return false;
10044 
10045   // Do not tail call opt if the stack is used to pass parameters.
10046   if (CCInfo.getNextStackOffset() != 0)
10047     return false;
10048 
10049   // Do not tail call opt if any parameters need to be passed indirectly.
10050   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10051   // passed indirectly. So the address of the value will be passed in a
10052   // register, or if not available, then the address is put on the stack. In
10053   // order to pass indirectly, space on the stack often needs to be allocated
10054   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10055   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10056   // are passed CCValAssign::Indirect.
10057   for (auto &VA : ArgLocs)
10058     if (VA.getLocInfo() == CCValAssign::Indirect)
10059       return false;
10060 
10061   // Do not tail call opt if either caller or callee uses struct return
10062   // semantics.
10063   auto IsCallerStructRet = Caller.hasStructRetAttr();
10064   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10065   if (IsCallerStructRet || IsCalleeStructRet)
10066     return false;
10067 
10068   // Externally-defined functions with weak linkage should not be
10069   // tail-called. The behaviour of branch instructions in this situation (as
10070   // used for tail calls) is implementation-defined, so we cannot rely on the
10071   // linker replacing the tail call with a return.
10072   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10073     const GlobalValue *GV = G->getGlobal();
10074     if (GV->hasExternalWeakLinkage())
10075       return false;
10076   }
10077 
10078   // The callee has to preserve all registers the caller needs to preserve.
10079   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10080   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10081   if (CalleeCC != CallerCC) {
10082     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10083     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10084       return false;
10085   }
10086 
10087   // Byval parameters hand the function a pointer directly into the stack area
10088   // we want to reuse during a tail call. Working around this *is* possible
10089   // but less efficient and uglier in LowerCall.
10090   for (auto &Arg : Outs)
10091     if (Arg.Flags.isByVal())
10092       return false;
10093 
10094   return true;
10095 }
10096 
10097 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10098   return DAG.getDataLayout().getPrefTypeAlign(
10099       VT.getTypeForEVT(*DAG.getContext()));
10100 }
10101 
10102 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10103 // and output parameter nodes.
10104 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10105                                        SmallVectorImpl<SDValue> &InVals) const {
10106   SelectionDAG &DAG = CLI.DAG;
10107   SDLoc &DL = CLI.DL;
10108   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10109   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10110   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10111   SDValue Chain = CLI.Chain;
10112   SDValue Callee = CLI.Callee;
10113   bool &IsTailCall = CLI.IsTailCall;
10114   CallingConv::ID CallConv = CLI.CallConv;
10115   bool IsVarArg = CLI.IsVarArg;
10116   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10117   MVT XLenVT = Subtarget.getXLenVT();
10118 
10119   MachineFunction &MF = DAG.getMachineFunction();
10120 
10121   // Analyze the operands of the call, assigning locations to each operand.
10122   SmallVector<CCValAssign, 16> ArgLocs;
10123   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10124 
10125   if (CallConv == CallingConv::GHC)
10126     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10127   else
10128     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10129                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10130                                                     : CC_RISCV);
10131 
10132   // Check if it's really possible to do a tail call.
10133   if (IsTailCall)
10134     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10135 
10136   if (IsTailCall)
10137     ++NumTailCalls;
10138   else if (CLI.CB && CLI.CB->isMustTailCall())
10139     report_fatal_error("failed to perform tail call elimination on a call "
10140                        "site marked musttail");
10141 
10142   // Get a count of how many bytes are to be pushed on the stack.
10143   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10144 
10145   // Create local copies for byval args
10146   SmallVector<SDValue, 8> ByValArgs;
10147   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10148     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10149     if (!Flags.isByVal())
10150       continue;
10151 
10152     SDValue Arg = OutVals[i];
10153     unsigned Size = Flags.getByValSize();
10154     Align Alignment = Flags.getNonZeroByValAlign();
10155 
10156     int FI =
10157         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10158     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10159     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10160 
10161     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10162                           /*IsVolatile=*/false,
10163                           /*AlwaysInline=*/false, IsTailCall,
10164                           MachinePointerInfo(), MachinePointerInfo());
10165     ByValArgs.push_back(FIPtr);
10166   }
10167 
10168   if (!IsTailCall)
10169     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10170 
10171   // Copy argument values to their designated locations.
10172   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10173   SmallVector<SDValue, 8> MemOpChains;
10174   SDValue StackPtr;
10175   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10176     CCValAssign &VA = ArgLocs[i];
10177     SDValue ArgValue = OutVals[i];
10178     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10179 
10180     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10181     bool IsF64OnRV32DSoftABI =
10182         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10183     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10184       SDValue SplitF64 = DAG.getNode(
10185           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10186       SDValue Lo = SplitF64.getValue(0);
10187       SDValue Hi = SplitF64.getValue(1);
10188 
10189       Register RegLo = VA.getLocReg();
10190       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10191 
10192       if (RegLo == RISCV::X17) {
10193         // Second half of f64 is passed on the stack.
10194         // Work out the address of the stack slot.
10195         if (!StackPtr.getNode())
10196           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10197         // Emit the store.
10198         MemOpChains.push_back(
10199             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10200       } else {
10201         // Second half of f64 is passed in another GPR.
10202         assert(RegLo < RISCV::X31 && "Invalid register pair");
10203         Register RegHigh = RegLo + 1;
10204         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10205       }
10206       continue;
10207     }
10208 
10209     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10210     // as any other MemLoc.
10211 
10212     // Promote the value if needed.
10213     // For now, only handle fully promoted and indirect arguments.
10214     if (VA.getLocInfo() == CCValAssign::Indirect) {
10215       // Store the argument in a stack slot and pass its address.
10216       Align StackAlign =
10217           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10218                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10219       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10220       // If the original argument was split (e.g. i128), we need
10221       // to store the required parts of it here (and pass just one address).
10222       // Vectors may be partly split to registers and partly to the stack, in
10223       // which case the base address is partly offset and subsequent stores are
10224       // relative to that.
10225       unsigned ArgIndex = Outs[i].OrigArgIndex;
10226       unsigned ArgPartOffset = Outs[i].PartOffset;
10227       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10228       // Calculate the total size to store. We don't have access to what we're
10229       // actually storing other than performing the loop and collecting the
10230       // info.
10231       SmallVector<std::pair<SDValue, SDValue>> Parts;
10232       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10233         SDValue PartValue = OutVals[i + 1];
10234         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10235         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10236         EVT PartVT = PartValue.getValueType();
10237         if (PartVT.isScalableVector())
10238           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10239         StoredSize += PartVT.getStoreSize();
10240         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10241         Parts.push_back(std::make_pair(PartValue, Offset));
10242         ++i;
10243       }
10244       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10245       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10246       MemOpChains.push_back(
10247           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10248                        MachinePointerInfo::getFixedStack(MF, FI)));
10249       for (const auto &Part : Parts) {
10250         SDValue PartValue = Part.first;
10251         SDValue PartOffset = Part.second;
10252         SDValue Address =
10253             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10254         MemOpChains.push_back(
10255             DAG.getStore(Chain, DL, PartValue, Address,
10256                          MachinePointerInfo::getFixedStack(MF, FI)));
10257       }
10258       ArgValue = SpillSlot;
10259     } else {
10260       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10261     }
10262 
10263     // Use local copy if it is a byval arg.
10264     if (Flags.isByVal())
10265       ArgValue = ByValArgs[j++];
10266 
10267     if (VA.isRegLoc()) {
10268       // Queue up the argument copies and emit them at the end.
10269       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10270     } else {
10271       assert(VA.isMemLoc() && "Argument not register or memory");
10272       assert(!IsTailCall && "Tail call not allowed if stack is used "
10273                             "for passing parameters");
10274 
10275       // Work out the address of the stack slot.
10276       if (!StackPtr.getNode())
10277         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10278       SDValue Address =
10279           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10280                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10281 
10282       // Emit the store.
10283       MemOpChains.push_back(
10284           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10285     }
10286   }
10287 
10288   // Join the stores, which are independent of one another.
10289   if (!MemOpChains.empty())
10290     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10291 
10292   SDValue Glue;
10293 
10294   // Build a sequence of copy-to-reg nodes, chained and glued together.
10295   for (auto &Reg : RegsToPass) {
10296     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10297     Glue = Chain.getValue(1);
10298   }
10299 
10300   // Validate that none of the argument registers have been marked as
10301   // reserved, if so report an error. Do the same for the return address if this
10302   // is not a tailcall.
10303   validateCCReservedRegs(RegsToPass, MF);
10304   if (!IsTailCall &&
10305       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10306     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10307         MF.getFunction(),
10308         "Return address register required, but has been reserved."});
10309 
10310   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10311   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10312   // split it and then direct call can be matched by PseudoCALL.
10313   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10314     const GlobalValue *GV = S->getGlobal();
10315 
10316     unsigned OpFlags = RISCVII::MO_CALL;
10317     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10318       OpFlags = RISCVII::MO_PLT;
10319 
10320     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10321   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10322     unsigned OpFlags = RISCVII::MO_CALL;
10323 
10324     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10325                                                  nullptr))
10326       OpFlags = RISCVII::MO_PLT;
10327 
10328     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10329   }
10330 
10331   // The first call operand is the chain and the second is the target address.
10332   SmallVector<SDValue, 8> Ops;
10333   Ops.push_back(Chain);
10334   Ops.push_back(Callee);
10335 
10336   // Add argument registers to the end of the list so that they are
10337   // known live into the call.
10338   for (auto &Reg : RegsToPass)
10339     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10340 
10341   if (!IsTailCall) {
10342     // Add a register mask operand representing the call-preserved registers.
10343     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10344     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10345     assert(Mask && "Missing call preserved mask for calling convention");
10346     Ops.push_back(DAG.getRegisterMask(Mask));
10347   }
10348 
10349   // Glue the call to the argument copies, if any.
10350   if (Glue.getNode())
10351     Ops.push_back(Glue);
10352 
10353   // Emit the call.
10354   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10355 
10356   if (IsTailCall) {
10357     MF.getFrameInfo().setHasTailCall();
10358     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10359   }
10360 
10361   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10362   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10363   Glue = Chain.getValue(1);
10364 
10365   // Mark the end of the call, which is glued to the call itself.
10366   Chain = DAG.getCALLSEQ_END(Chain,
10367                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10368                              DAG.getConstant(0, DL, PtrVT, true),
10369                              Glue, DL);
10370   Glue = Chain.getValue(1);
10371 
10372   // Assign locations to each value returned by this call.
10373   SmallVector<CCValAssign, 16> RVLocs;
10374   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10375   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10376 
10377   // Copy all of the result registers out of their specified physreg.
10378   for (auto &VA : RVLocs) {
10379     // Copy the value out
10380     SDValue RetValue =
10381         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10382     // Glue the RetValue to the end of the call sequence
10383     Chain = RetValue.getValue(1);
10384     Glue = RetValue.getValue(2);
10385 
10386     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10387       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10388       SDValue RetValue2 =
10389           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10390       Chain = RetValue2.getValue(1);
10391       Glue = RetValue2.getValue(2);
10392       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10393                              RetValue2);
10394     }
10395 
10396     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10397 
10398     InVals.push_back(RetValue);
10399   }
10400 
10401   return Chain;
10402 }
10403 
10404 bool RISCVTargetLowering::CanLowerReturn(
10405     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10406     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10407   SmallVector<CCValAssign, 16> RVLocs;
10408   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10409 
10410   Optional<unsigned> FirstMaskArgument;
10411   if (Subtarget.hasVInstructions())
10412     FirstMaskArgument = preAssignMask(Outs);
10413 
10414   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10415     MVT VT = Outs[i].VT;
10416     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10417     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10418     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10419                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10420                  *this, FirstMaskArgument))
10421       return false;
10422   }
10423   return true;
10424 }
10425 
10426 SDValue
10427 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10428                                  bool IsVarArg,
10429                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10430                                  const SmallVectorImpl<SDValue> &OutVals,
10431                                  const SDLoc &DL, SelectionDAG &DAG) const {
10432   const MachineFunction &MF = DAG.getMachineFunction();
10433   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10434 
10435   // Stores the assignment of the return value to a location.
10436   SmallVector<CCValAssign, 16> RVLocs;
10437 
10438   // Info about the registers and stack slot.
10439   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10440                  *DAG.getContext());
10441 
10442   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10443                     nullptr, CC_RISCV);
10444 
10445   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10446     report_fatal_error("GHC functions return void only");
10447 
10448   SDValue Glue;
10449   SmallVector<SDValue, 4> RetOps(1, Chain);
10450 
10451   // Copy the result values into the output registers.
10452   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10453     SDValue Val = OutVals[i];
10454     CCValAssign &VA = RVLocs[i];
10455     assert(VA.isRegLoc() && "Can only return in registers!");
10456 
10457     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10458       // Handle returning f64 on RV32D with a soft float ABI.
10459       assert(VA.isRegLoc() && "Expected return via registers");
10460       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10461                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10462       SDValue Lo = SplitF64.getValue(0);
10463       SDValue Hi = SplitF64.getValue(1);
10464       Register RegLo = VA.getLocReg();
10465       assert(RegLo < RISCV::X31 && "Invalid register pair");
10466       Register RegHi = RegLo + 1;
10467 
10468       if (STI.isRegisterReservedByUser(RegLo) ||
10469           STI.isRegisterReservedByUser(RegHi))
10470         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10471             MF.getFunction(),
10472             "Return value register required, but has been reserved."});
10473 
10474       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10475       Glue = Chain.getValue(1);
10476       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10477       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10478       Glue = Chain.getValue(1);
10479       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10480     } else {
10481       // Handle a 'normal' return.
10482       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10483       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10484 
10485       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10486         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10487             MF.getFunction(),
10488             "Return value register required, but has been reserved."});
10489 
10490       // Guarantee that all emitted copies are stuck together.
10491       Glue = Chain.getValue(1);
10492       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10493     }
10494   }
10495 
10496   RetOps[0] = Chain; // Update chain.
10497 
10498   // Add the glue node if we have it.
10499   if (Glue.getNode()) {
10500     RetOps.push_back(Glue);
10501   }
10502 
10503   unsigned RetOpc = RISCVISD::RET_FLAG;
10504   // Interrupt service routines use different return instructions.
10505   const Function &Func = DAG.getMachineFunction().getFunction();
10506   if (Func.hasFnAttribute("interrupt")) {
10507     if (!Func.getReturnType()->isVoidTy())
10508       report_fatal_error(
10509           "Functions with the interrupt attribute must have void return type!");
10510 
10511     MachineFunction &MF = DAG.getMachineFunction();
10512     StringRef Kind =
10513       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10514 
10515     if (Kind == "user")
10516       RetOpc = RISCVISD::URET_FLAG;
10517     else if (Kind == "supervisor")
10518       RetOpc = RISCVISD::SRET_FLAG;
10519     else
10520       RetOpc = RISCVISD::MRET_FLAG;
10521   }
10522 
10523   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10524 }
10525 
10526 void RISCVTargetLowering::validateCCReservedRegs(
10527     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10528     MachineFunction &MF) const {
10529   const Function &F = MF.getFunction();
10530   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10531 
10532   if (llvm::any_of(Regs, [&STI](auto Reg) {
10533         return STI.isRegisterReservedByUser(Reg.first);
10534       }))
10535     F.getContext().diagnose(DiagnosticInfoUnsupported{
10536         F, "Argument register required, but has been reserved."});
10537 }
10538 
10539 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10540   return CI->isTailCall();
10541 }
10542 
10543 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10544 #define NODE_NAME_CASE(NODE)                                                   \
10545   case RISCVISD::NODE:                                                         \
10546     return "RISCVISD::" #NODE;
10547   // clang-format off
10548   switch ((RISCVISD::NodeType)Opcode) {
10549   case RISCVISD::FIRST_NUMBER:
10550     break;
10551   NODE_NAME_CASE(RET_FLAG)
10552   NODE_NAME_CASE(URET_FLAG)
10553   NODE_NAME_CASE(SRET_FLAG)
10554   NODE_NAME_CASE(MRET_FLAG)
10555   NODE_NAME_CASE(CALL)
10556   NODE_NAME_CASE(SELECT_CC)
10557   NODE_NAME_CASE(BR_CC)
10558   NODE_NAME_CASE(BuildPairF64)
10559   NODE_NAME_CASE(SplitF64)
10560   NODE_NAME_CASE(TAIL)
10561   NODE_NAME_CASE(MULHSU)
10562   NODE_NAME_CASE(SLLW)
10563   NODE_NAME_CASE(SRAW)
10564   NODE_NAME_CASE(SRLW)
10565   NODE_NAME_CASE(DIVW)
10566   NODE_NAME_CASE(DIVUW)
10567   NODE_NAME_CASE(REMUW)
10568   NODE_NAME_CASE(ROLW)
10569   NODE_NAME_CASE(RORW)
10570   NODE_NAME_CASE(CLZW)
10571   NODE_NAME_CASE(CTZW)
10572   NODE_NAME_CASE(FSLW)
10573   NODE_NAME_CASE(FSRW)
10574   NODE_NAME_CASE(FSL)
10575   NODE_NAME_CASE(FSR)
10576   NODE_NAME_CASE(FMV_H_X)
10577   NODE_NAME_CASE(FMV_X_ANYEXTH)
10578   NODE_NAME_CASE(FMV_W_X_RV64)
10579   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10580   NODE_NAME_CASE(FCVT_X)
10581   NODE_NAME_CASE(FCVT_XU)
10582   NODE_NAME_CASE(FCVT_W_RV64)
10583   NODE_NAME_CASE(FCVT_WU_RV64)
10584   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10585   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10586   NODE_NAME_CASE(READ_CYCLE_WIDE)
10587   NODE_NAME_CASE(GREV)
10588   NODE_NAME_CASE(GREVW)
10589   NODE_NAME_CASE(GORC)
10590   NODE_NAME_CASE(GORCW)
10591   NODE_NAME_CASE(SHFL)
10592   NODE_NAME_CASE(SHFLW)
10593   NODE_NAME_CASE(UNSHFL)
10594   NODE_NAME_CASE(UNSHFLW)
10595   NODE_NAME_CASE(BFP)
10596   NODE_NAME_CASE(BFPW)
10597   NODE_NAME_CASE(BCOMPRESS)
10598   NODE_NAME_CASE(BCOMPRESSW)
10599   NODE_NAME_CASE(BDECOMPRESS)
10600   NODE_NAME_CASE(BDECOMPRESSW)
10601   NODE_NAME_CASE(VMV_V_X_VL)
10602   NODE_NAME_CASE(VFMV_V_F_VL)
10603   NODE_NAME_CASE(VMV_X_S)
10604   NODE_NAME_CASE(VMV_S_X_VL)
10605   NODE_NAME_CASE(VFMV_S_F_VL)
10606   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10607   NODE_NAME_CASE(READ_VLENB)
10608   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10609   NODE_NAME_CASE(VSLIDEUP_VL)
10610   NODE_NAME_CASE(VSLIDE1UP_VL)
10611   NODE_NAME_CASE(VSLIDEDOWN_VL)
10612   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10613   NODE_NAME_CASE(VID_VL)
10614   NODE_NAME_CASE(VFNCVT_ROD_VL)
10615   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10616   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10617   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10618   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10619   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10620   NODE_NAME_CASE(VECREDUCE_AND_VL)
10621   NODE_NAME_CASE(VECREDUCE_OR_VL)
10622   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10623   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10624   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10625   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10626   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10627   NODE_NAME_CASE(ADD_VL)
10628   NODE_NAME_CASE(AND_VL)
10629   NODE_NAME_CASE(MUL_VL)
10630   NODE_NAME_CASE(OR_VL)
10631   NODE_NAME_CASE(SDIV_VL)
10632   NODE_NAME_CASE(SHL_VL)
10633   NODE_NAME_CASE(SREM_VL)
10634   NODE_NAME_CASE(SRA_VL)
10635   NODE_NAME_CASE(SRL_VL)
10636   NODE_NAME_CASE(SUB_VL)
10637   NODE_NAME_CASE(UDIV_VL)
10638   NODE_NAME_CASE(UREM_VL)
10639   NODE_NAME_CASE(XOR_VL)
10640   NODE_NAME_CASE(SADDSAT_VL)
10641   NODE_NAME_CASE(UADDSAT_VL)
10642   NODE_NAME_CASE(SSUBSAT_VL)
10643   NODE_NAME_CASE(USUBSAT_VL)
10644   NODE_NAME_CASE(FADD_VL)
10645   NODE_NAME_CASE(FSUB_VL)
10646   NODE_NAME_CASE(FMUL_VL)
10647   NODE_NAME_CASE(FDIV_VL)
10648   NODE_NAME_CASE(FNEG_VL)
10649   NODE_NAME_CASE(FABS_VL)
10650   NODE_NAME_CASE(FSQRT_VL)
10651   NODE_NAME_CASE(FMA_VL)
10652   NODE_NAME_CASE(FCOPYSIGN_VL)
10653   NODE_NAME_CASE(SMIN_VL)
10654   NODE_NAME_CASE(SMAX_VL)
10655   NODE_NAME_CASE(UMIN_VL)
10656   NODE_NAME_CASE(UMAX_VL)
10657   NODE_NAME_CASE(FMINNUM_VL)
10658   NODE_NAME_CASE(FMAXNUM_VL)
10659   NODE_NAME_CASE(MULHS_VL)
10660   NODE_NAME_CASE(MULHU_VL)
10661   NODE_NAME_CASE(FP_TO_SINT_VL)
10662   NODE_NAME_CASE(FP_TO_UINT_VL)
10663   NODE_NAME_CASE(SINT_TO_FP_VL)
10664   NODE_NAME_CASE(UINT_TO_FP_VL)
10665   NODE_NAME_CASE(FP_EXTEND_VL)
10666   NODE_NAME_CASE(FP_ROUND_VL)
10667   NODE_NAME_CASE(VWMUL_VL)
10668   NODE_NAME_CASE(VWMULU_VL)
10669   NODE_NAME_CASE(VWMULSU_VL)
10670   NODE_NAME_CASE(VWADD_VL)
10671   NODE_NAME_CASE(VWADDU_VL)
10672   NODE_NAME_CASE(VWSUB_VL)
10673   NODE_NAME_CASE(VWSUBU_VL)
10674   NODE_NAME_CASE(VWADD_W_VL)
10675   NODE_NAME_CASE(VWADDU_W_VL)
10676   NODE_NAME_CASE(VWSUB_W_VL)
10677   NODE_NAME_CASE(VWSUBU_W_VL)
10678   NODE_NAME_CASE(SETCC_VL)
10679   NODE_NAME_CASE(VSELECT_VL)
10680   NODE_NAME_CASE(VP_MERGE_VL)
10681   NODE_NAME_CASE(VMAND_VL)
10682   NODE_NAME_CASE(VMOR_VL)
10683   NODE_NAME_CASE(VMXOR_VL)
10684   NODE_NAME_CASE(VMCLR_VL)
10685   NODE_NAME_CASE(VMSET_VL)
10686   NODE_NAME_CASE(VRGATHER_VX_VL)
10687   NODE_NAME_CASE(VRGATHER_VV_VL)
10688   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10689   NODE_NAME_CASE(VSEXT_VL)
10690   NODE_NAME_CASE(VZEXT_VL)
10691   NODE_NAME_CASE(VCPOP_VL)
10692   NODE_NAME_CASE(VLE_VL)
10693   NODE_NAME_CASE(VSE_VL)
10694   NODE_NAME_CASE(READ_CSR)
10695   NODE_NAME_CASE(WRITE_CSR)
10696   NODE_NAME_CASE(SWAP_CSR)
10697   }
10698   // clang-format on
10699   return nullptr;
10700 #undef NODE_NAME_CASE
10701 }
10702 
10703 /// getConstraintType - Given a constraint letter, return the type of
10704 /// constraint it is for this target.
10705 RISCVTargetLowering::ConstraintType
10706 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10707   if (Constraint.size() == 1) {
10708     switch (Constraint[0]) {
10709     default:
10710       break;
10711     case 'f':
10712       return C_RegisterClass;
10713     case 'I':
10714     case 'J':
10715     case 'K':
10716       return C_Immediate;
10717     case 'A':
10718       return C_Memory;
10719     case 'S': // A symbolic address
10720       return C_Other;
10721     }
10722   } else {
10723     if (Constraint == "vr" || Constraint == "vm")
10724       return C_RegisterClass;
10725   }
10726   return TargetLowering::getConstraintType(Constraint);
10727 }
10728 
10729 std::pair<unsigned, const TargetRegisterClass *>
10730 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10731                                                   StringRef Constraint,
10732                                                   MVT VT) const {
10733   // First, see if this is a constraint that directly corresponds to a
10734   // RISCV register class.
10735   if (Constraint.size() == 1) {
10736     switch (Constraint[0]) {
10737     case 'r':
10738       // TODO: Support fixed vectors up to XLen for P extension?
10739       if (VT.isVector())
10740         break;
10741       return std::make_pair(0U, &RISCV::GPRRegClass);
10742     case 'f':
10743       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10744         return std::make_pair(0U, &RISCV::FPR16RegClass);
10745       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10746         return std::make_pair(0U, &RISCV::FPR32RegClass);
10747       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10748         return std::make_pair(0U, &RISCV::FPR64RegClass);
10749       break;
10750     default:
10751       break;
10752     }
10753   } else if (Constraint == "vr") {
10754     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10755                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10756       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10757         return std::make_pair(0U, RC);
10758     }
10759   } else if (Constraint == "vm") {
10760     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10761       return std::make_pair(0U, &RISCV::VMV0RegClass);
10762   }
10763 
10764   // Clang will correctly decode the usage of register name aliases into their
10765   // official names. However, other frontends like `rustc` do not. This allows
10766   // users of these frontends to use the ABI names for registers in LLVM-style
10767   // register constraints.
10768   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10769                                .Case("{zero}", RISCV::X0)
10770                                .Case("{ra}", RISCV::X1)
10771                                .Case("{sp}", RISCV::X2)
10772                                .Case("{gp}", RISCV::X3)
10773                                .Case("{tp}", RISCV::X4)
10774                                .Case("{t0}", RISCV::X5)
10775                                .Case("{t1}", RISCV::X6)
10776                                .Case("{t2}", RISCV::X7)
10777                                .Cases("{s0}", "{fp}", RISCV::X8)
10778                                .Case("{s1}", RISCV::X9)
10779                                .Case("{a0}", RISCV::X10)
10780                                .Case("{a1}", RISCV::X11)
10781                                .Case("{a2}", RISCV::X12)
10782                                .Case("{a3}", RISCV::X13)
10783                                .Case("{a4}", RISCV::X14)
10784                                .Case("{a5}", RISCV::X15)
10785                                .Case("{a6}", RISCV::X16)
10786                                .Case("{a7}", RISCV::X17)
10787                                .Case("{s2}", RISCV::X18)
10788                                .Case("{s3}", RISCV::X19)
10789                                .Case("{s4}", RISCV::X20)
10790                                .Case("{s5}", RISCV::X21)
10791                                .Case("{s6}", RISCV::X22)
10792                                .Case("{s7}", RISCV::X23)
10793                                .Case("{s8}", RISCV::X24)
10794                                .Case("{s9}", RISCV::X25)
10795                                .Case("{s10}", RISCV::X26)
10796                                .Case("{s11}", RISCV::X27)
10797                                .Case("{t3}", RISCV::X28)
10798                                .Case("{t4}", RISCV::X29)
10799                                .Case("{t5}", RISCV::X30)
10800                                .Case("{t6}", RISCV::X31)
10801                                .Default(RISCV::NoRegister);
10802   if (XRegFromAlias != RISCV::NoRegister)
10803     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10804 
10805   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10806   // TableGen record rather than the AsmName to choose registers for InlineAsm
10807   // constraints, plus we want to match those names to the widest floating point
10808   // register type available, manually select floating point registers here.
10809   //
10810   // The second case is the ABI name of the register, so that frontends can also
10811   // use the ABI names in register constraint lists.
10812   if (Subtarget.hasStdExtF()) {
10813     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10814                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10815                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10816                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10817                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10818                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10819                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10820                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10821                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10822                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10823                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10824                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10825                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10826                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10827                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10828                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10829                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10830                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10831                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10832                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10833                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10834                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10835                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10836                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10837                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10838                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10839                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10840                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10841                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10842                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10843                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10844                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10845                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10846                         .Default(RISCV::NoRegister);
10847     if (FReg != RISCV::NoRegister) {
10848       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10849       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10850         unsigned RegNo = FReg - RISCV::F0_F;
10851         unsigned DReg = RISCV::F0_D + RegNo;
10852         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10853       }
10854       if (VT == MVT::f32 || VT == MVT::Other)
10855         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10856       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10857         unsigned RegNo = FReg - RISCV::F0_F;
10858         unsigned HReg = RISCV::F0_H + RegNo;
10859         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10860       }
10861     }
10862   }
10863 
10864   if (Subtarget.hasVInstructions()) {
10865     Register VReg = StringSwitch<Register>(Constraint.lower())
10866                         .Case("{v0}", RISCV::V0)
10867                         .Case("{v1}", RISCV::V1)
10868                         .Case("{v2}", RISCV::V2)
10869                         .Case("{v3}", RISCV::V3)
10870                         .Case("{v4}", RISCV::V4)
10871                         .Case("{v5}", RISCV::V5)
10872                         .Case("{v6}", RISCV::V6)
10873                         .Case("{v7}", RISCV::V7)
10874                         .Case("{v8}", RISCV::V8)
10875                         .Case("{v9}", RISCV::V9)
10876                         .Case("{v10}", RISCV::V10)
10877                         .Case("{v11}", RISCV::V11)
10878                         .Case("{v12}", RISCV::V12)
10879                         .Case("{v13}", RISCV::V13)
10880                         .Case("{v14}", RISCV::V14)
10881                         .Case("{v15}", RISCV::V15)
10882                         .Case("{v16}", RISCV::V16)
10883                         .Case("{v17}", RISCV::V17)
10884                         .Case("{v18}", RISCV::V18)
10885                         .Case("{v19}", RISCV::V19)
10886                         .Case("{v20}", RISCV::V20)
10887                         .Case("{v21}", RISCV::V21)
10888                         .Case("{v22}", RISCV::V22)
10889                         .Case("{v23}", RISCV::V23)
10890                         .Case("{v24}", RISCV::V24)
10891                         .Case("{v25}", RISCV::V25)
10892                         .Case("{v26}", RISCV::V26)
10893                         .Case("{v27}", RISCV::V27)
10894                         .Case("{v28}", RISCV::V28)
10895                         .Case("{v29}", RISCV::V29)
10896                         .Case("{v30}", RISCV::V30)
10897                         .Case("{v31}", RISCV::V31)
10898                         .Default(RISCV::NoRegister);
10899     if (VReg != RISCV::NoRegister) {
10900       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10901         return std::make_pair(VReg, &RISCV::VMRegClass);
10902       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10903         return std::make_pair(VReg, &RISCV::VRRegClass);
10904       for (const auto *RC :
10905            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10906         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10907           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10908           return std::make_pair(VReg, RC);
10909         }
10910       }
10911     }
10912   }
10913 
10914   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10915 }
10916 
10917 unsigned
10918 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10919   // Currently only support length 1 constraints.
10920   if (ConstraintCode.size() == 1) {
10921     switch (ConstraintCode[0]) {
10922     case 'A':
10923       return InlineAsm::Constraint_A;
10924     default:
10925       break;
10926     }
10927   }
10928 
10929   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10930 }
10931 
10932 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10933     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10934     SelectionDAG &DAG) const {
10935   // Currently only support length 1 constraints.
10936   if (Constraint.length() == 1) {
10937     switch (Constraint[0]) {
10938     case 'I':
10939       // Validate & create a 12-bit signed immediate operand.
10940       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10941         uint64_t CVal = C->getSExtValue();
10942         if (isInt<12>(CVal))
10943           Ops.push_back(
10944               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10945       }
10946       return;
10947     case 'J':
10948       // Validate & create an integer zero operand.
10949       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10950         if (C->getZExtValue() == 0)
10951           Ops.push_back(
10952               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10953       return;
10954     case 'K':
10955       // Validate & create a 5-bit unsigned immediate operand.
10956       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10957         uint64_t CVal = C->getZExtValue();
10958         if (isUInt<5>(CVal))
10959           Ops.push_back(
10960               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10961       }
10962       return;
10963     case 'S':
10964       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10965         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10966                                                  GA->getValueType(0)));
10967       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10968         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10969                                                 BA->getValueType(0)));
10970       }
10971       return;
10972     default:
10973       break;
10974     }
10975   }
10976   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10977 }
10978 
10979 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10980                                                    Instruction *Inst,
10981                                                    AtomicOrdering Ord) const {
10982   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10983     return Builder.CreateFence(Ord);
10984   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10985     return Builder.CreateFence(AtomicOrdering::Release);
10986   return nullptr;
10987 }
10988 
10989 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10990                                                     Instruction *Inst,
10991                                                     AtomicOrdering Ord) const {
10992   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10993     return Builder.CreateFence(AtomicOrdering::Acquire);
10994   return nullptr;
10995 }
10996 
10997 TargetLowering::AtomicExpansionKind
10998 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10999   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11000   // point operations can't be used in an lr/sc sequence without breaking the
11001   // forward-progress guarantee.
11002   if (AI->isFloatingPointOperation())
11003     return AtomicExpansionKind::CmpXChg;
11004 
11005   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11006   if (Size == 8 || Size == 16)
11007     return AtomicExpansionKind::MaskedIntrinsic;
11008   return AtomicExpansionKind::None;
11009 }
11010 
11011 static Intrinsic::ID
11012 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11013   if (XLen == 32) {
11014     switch (BinOp) {
11015     default:
11016       llvm_unreachable("Unexpected AtomicRMW BinOp");
11017     case AtomicRMWInst::Xchg:
11018       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11019     case AtomicRMWInst::Add:
11020       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11021     case AtomicRMWInst::Sub:
11022       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11023     case AtomicRMWInst::Nand:
11024       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11025     case AtomicRMWInst::Max:
11026       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11027     case AtomicRMWInst::Min:
11028       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11029     case AtomicRMWInst::UMax:
11030       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11031     case AtomicRMWInst::UMin:
11032       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11033     }
11034   }
11035 
11036   if (XLen == 64) {
11037     switch (BinOp) {
11038     default:
11039       llvm_unreachable("Unexpected AtomicRMW BinOp");
11040     case AtomicRMWInst::Xchg:
11041       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11042     case AtomicRMWInst::Add:
11043       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11044     case AtomicRMWInst::Sub:
11045       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11046     case AtomicRMWInst::Nand:
11047       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11048     case AtomicRMWInst::Max:
11049       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11050     case AtomicRMWInst::Min:
11051       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11052     case AtomicRMWInst::UMax:
11053       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11054     case AtomicRMWInst::UMin:
11055       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11056     }
11057   }
11058 
11059   llvm_unreachable("Unexpected XLen\n");
11060 }
11061 
11062 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11063     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11064     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11065   unsigned XLen = Subtarget.getXLen();
11066   Value *Ordering =
11067       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11068   Type *Tys[] = {AlignedAddr->getType()};
11069   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11070       AI->getModule(),
11071       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11072 
11073   if (XLen == 64) {
11074     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11075     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11076     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11077   }
11078 
11079   Value *Result;
11080 
11081   // Must pass the shift amount needed to sign extend the loaded value prior
11082   // to performing a signed comparison for min/max. ShiftAmt is the number of
11083   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11084   // is the number of bits to left+right shift the value in order to
11085   // sign-extend.
11086   if (AI->getOperation() == AtomicRMWInst::Min ||
11087       AI->getOperation() == AtomicRMWInst::Max) {
11088     const DataLayout &DL = AI->getModule()->getDataLayout();
11089     unsigned ValWidth =
11090         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11091     Value *SextShamt =
11092         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11093     Result = Builder.CreateCall(LrwOpScwLoop,
11094                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11095   } else {
11096     Result =
11097         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11098   }
11099 
11100   if (XLen == 64)
11101     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11102   return Result;
11103 }
11104 
11105 TargetLowering::AtomicExpansionKind
11106 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11107     AtomicCmpXchgInst *CI) const {
11108   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11109   if (Size == 8 || Size == 16)
11110     return AtomicExpansionKind::MaskedIntrinsic;
11111   return AtomicExpansionKind::None;
11112 }
11113 
11114 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11115     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11116     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11117   unsigned XLen = Subtarget.getXLen();
11118   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11119   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11120   if (XLen == 64) {
11121     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11122     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11123     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11124     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11125   }
11126   Type *Tys[] = {AlignedAddr->getType()};
11127   Function *MaskedCmpXchg =
11128       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11129   Value *Result = Builder.CreateCall(
11130       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11131   if (XLen == 64)
11132     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11133   return Result;
11134 }
11135 
11136 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11137   return false;
11138 }
11139 
11140 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11141                                                EVT VT) const {
11142   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11143     return false;
11144 
11145   switch (FPVT.getSimpleVT().SimpleTy) {
11146   case MVT::f16:
11147     return Subtarget.hasStdExtZfh();
11148   case MVT::f32:
11149     return Subtarget.hasStdExtF();
11150   case MVT::f64:
11151     return Subtarget.hasStdExtD();
11152   default:
11153     return false;
11154   }
11155 }
11156 
11157 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11158   // If we are using the small code model, we can reduce size of jump table
11159   // entry to 4 bytes.
11160   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11161       getTargetMachine().getCodeModel() == CodeModel::Small) {
11162     return MachineJumpTableInfo::EK_Custom32;
11163   }
11164   return TargetLowering::getJumpTableEncoding();
11165 }
11166 
11167 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11168     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11169     unsigned uid, MCContext &Ctx) const {
11170   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11171          getTargetMachine().getCodeModel() == CodeModel::Small);
11172   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11173 }
11174 
11175 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11176                                                      EVT VT) const {
11177   VT = VT.getScalarType();
11178 
11179   if (!VT.isSimple())
11180     return false;
11181 
11182   switch (VT.getSimpleVT().SimpleTy) {
11183   case MVT::f16:
11184     return Subtarget.hasStdExtZfh();
11185   case MVT::f32:
11186     return Subtarget.hasStdExtF();
11187   case MVT::f64:
11188     return Subtarget.hasStdExtD();
11189   default:
11190     break;
11191   }
11192 
11193   return false;
11194 }
11195 
11196 Register RISCVTargetLowering::getExceptionPointerRegister(
11197     const Constant *PersonalityFn) const {
11198   return RISCV::X10;
11199 }
11200 
11201 Register RISCVTargetLowering::getExceptionSelectorRegister(
11202     const Constant *PersonalityFn) const {
11203   return RISCV::X11;
11204 }
11205 
11206 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11207   // Return false to suppress the unnecessary extensions if the LibCall
11208   // arguments or return value is f32 type for LP64 ABI.
11209   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11210   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11211     return false;
11212 
11213   return true;
11214 }
11215 
11216 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11217   if (Subtarget.is64Bit() && Type == MVT::i32)
11218     return true;
11219 
11220   return IsSigned;
11221 }
11222 
11223 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11224                                                  SDValue C) const {
11225   // Check integral scalar types.
11226   if (VT.isScalarInteger()) {
11227     // Omit the optimization if the sub target has the M extension and the data
11228     // size exceeds XLen.
11229     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11230       return false;
11231     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11232       // Break the MUL to a SLLI and an ADD/SUB.
11233       const APInt &Imm = ConstNode->getAPIntValue();
11234       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11235           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11236         return true;
11237       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11238       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11239           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11240            (Imm - 8).isPowerOf2()))
11241         return true;
11242       // Omit the following optimization if the sub target has the M extension
11243       // and the data size >= XLen.
11244       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11245         return false;
11246       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11247       // a pair of LUI/ADDI.
11248       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11249         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11250         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11251             (1 - ImmS).isPowerOf2())
11252         return true;
11253       }
11254     }
11255   }
11256 
11257   return false;
11258 }
11259 
11260 bool RISCVTargetLowering::isMulAddWithConstProfitable(
11261     const SDValue &AddNode, const SDValue &ConstNode) const {
11262   // Let the DAGCombiner decide for vectors.
11263   EVT VT = AddNode.getValueType();
11264   if (VT.isVector())
11265     return true;
11266 
11267   // Let the DAGCombiner decide for larger types.
11268   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11269     return true;
11270 
11271   // It is worse if c1 is simm12 while c1*c2 is not.
11272   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11273   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11274   const APInt &C1 = C1Node->getAPIntValue();
11275   const APInt &C2 = C2Node->getAPIntValue();
11276   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11277     return false;
11278 
11279   // Default to true and let the DAGCombiner decide.
11280   return true;
11281 }
11282 
11283 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11284     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11285     bool *Fast) const {
11286   if (!VT.isVector())
11287     return false;
11288 
11289   EVT ElemVT = VT.getVectorElementType();
11290   if (Alignment >= ElemVT.getStoreSize()) {
11291     if (Fast)
11292       *Fast = true;
11293     return true;
11294   }
11295 
11296   return false;
11297 }
11298 
11299 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11300     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11301     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11302   bool IsABIRegCopy = CC.hasValue();
11303   EVT ValueVT = Val.getValueType();
11304   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11305     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11306     // and cast to f32.
11307     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11308     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11309     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11310                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11311     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11312     Parts[0] = Val;
11313     return true;
11314   }
11315 
11316   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11317     LLVMContext &Context = *DAG.getContext();
11318     EVT ValueEltVT = ValueVT.getVectorElementType();
11319     EVT PartEltVT = PartVT.getVectorElementType();
11320     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11321     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11322     if (PartVTBitSize % ValueVTBitSize == 0) {
11323       assert(PartVTBitSize >= ValueVTBitSize);
11324       // If the element types are different, bitcast to the same element type of
11325       // PartVT first.
11326       // Give an example here, we want copy a <vscale x 1 x i8> value to
11327       // <vscale x 4 x i16>.
11328       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11329       // subvector, then we can bitcast to <vscale x 4 x i16>.
11330       if (ValueEltVT != PartEltVT) {
11331         if (PartVTBitSize > ValueVTBitSize) {
11332           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11333           assert(Count != 0 && "The number of element should not be zero.");
11334           EVT SameEltTypeVT =
11335               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11336           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11337                             DAG.getUNDEF(SameEltTypeVT), Val,
11338                             DAG.getVectorIdxConstant(0, DL));
11339         }
11340         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11341       } else {
11342         Val =
11343             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11344                         Val, DAG.getVectorIdxConstant(0, DL));
11345       }
11346       Parts[0] = Val;
11347       return true;
11348     }
11349   }
11350   return false;
11351 }
11352 
11353 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11354     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11355     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11356   bool IsABIRegCopy = CC.hasValue();
11357   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11358     SDValue Val = Parts[0];
11359 
11360     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11361     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11362     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11363     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11364     return Val;
11365   }
11366 
11367   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11368     LLVMContext &Context = *DAG.getContext();
11369     SDValue Val = Parts[0];
11370     EVT ValueEltVT = ValueVT.getVectorElementType();
11371     EVT PartEltVT = PartVT.getVectorElementType();
11372     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11373     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11374     if (PartVTBitSize % ValueVTBitSize == 0) {
11375       assert(PartVTBitSize >= ValueVTBitSize);
11376       EVT SameEltTypeVT = ValueVT;
11377       // If the element types are different, convert it to the same element type
11378       // of PartVT.
11379       // Give an example here, we want copy a <vscale x 1 x i8> value from
11380       // <vscale x 4 x i16>.
11381       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11382       // then we can extract <vscale x 1 x i8>.
11383       if (ValueEltVT != PartEltVT) {
11384         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11385         assert(Count != 0 && "The number of element should not be zero.");
11386         SameEltTypeVT =
11387             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11388         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11389       }
11390       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11391                         DAG.getVectorIdxConstant(0, DL));
11392       return Val;
11393     }
11394   }
11395   return SDValue();
11396 }
11397 
11398 SDValue
11399 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11400                                    SelectionDAG &DAG,
11401                                    SmallVectorImpl<SDNode *> &Created) const {
11402   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11403   if (isIntDivCheap(N->getValueType(0), Attr))
11404     return SDValue(N, 0); // Lower SDIV as SDIV
11405 
11406   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11407          "Unexpected divisor!");
11408 
11409   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11410   if (!Subtarget.hasStdExtZbt())
11411     return SDValue();
11412 
11413   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11414   // Besides, more critical path instructions will be generated when dividing
11415   // by 2. So we keep using the original DAGs for these cases.
11416   unsigned Lg2 = Divisor.countTrailingZeros();
11417   if (Lg2 == 1 || Lg2 >= 12)
11418     return SDValue();
11419 
11420   // fold (sdiv X, pow2)
11421   EVT VT = N->getValueType(0);
11422   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11423     return SDValue();
11424 
11425   SDLoc DL(N);
11426   SDValue N0 = N->getOperand(0);
11427   SDValue Zero = DAG.getConstant(0, DL, VT);
11428   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11429 
11430   // Add (N0 < 0) ? Pow2 - 1 : 0;
11431   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11432   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11433   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11434 
11435   Created.push_back(Cmp.getNode());
11436   Created.push_back(Add.getNode());
11437   Created.push_back(Sel.getNode());
11438 
11439   // Divide by pow2.
11440   SDValue SRA =
11441       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11442 
11443   // If we're dividing by a positive value, we're done.  Otherwise, we must
11444   // negate the result.
11445   if (Divisor.isNonNegative())
11446     return SRA;
11447 
11448   Created.push_back(SRA.getNode());
11449   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11450 }
11451 
11452 #define GET_REGISTER_MATCHER
11453 #include "RISCVGenAsmMatcher.inc"
11454 
11455 Register
11456 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11457                                        const MachineFunction &MF) const {
11458   Register Reg = MatchRegisterAltName(RegName);
11459   if (Reg == RISCV::NoRegister)
11460     Reg = MatchRegisterName(RegName);
11461   if (Reg == RISCV::NoRegister)
11462     report_fatal_error(
11463         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11464   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11465   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11466     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11467                              StringRef(RegName) + "\"."));
11468   return Reg;
11469 }
11470 
11471 namespace llvm {
11472 namespace RISCVVIntrinsicsTable {
11473 
11474 #define GET_RISCVVIntrinsicsTable_IMPL
11475 #include "RISCVGenSearchableTables.inc"
11476 
11477 } // namespace RISCVVIntrinsicsTable
11478 
11479 } // namespace llvm
11480