1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306   }
307 
308   if (Subtarget.hasStdExtZbt()) {
309     setOperationAction(ISD::FSHL, XLenVT, Custom);
310     setOperationAction(ISD::FSHR, XLenVT, Custom);
311     setOperationAction(ISD::SELECT, XLenVT, Legal);
312 
313     if (Subtarget.is64Bit()) {
314       setOperationAction(ISD::FSHL, MVT::i32, Custom);
315       setOperationAction(ISD::FSHR, MVT::i32, Custom);
316     }
317   } else {
318     setOperationAction(ISD::SELECT, XLenVT, Custom);
319   }
320 
321   static const ISD::CondCode FPCCToExpand[] = {
322       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
323       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
324       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
325 
326   static const ISD::NodeType FPOpToExpand[] = {
327       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
328       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
329 
330   if (Subtarget.hasStdExtZfh())
331     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
332 
333   if (Subtarget.hasStdExtZfh()) {
334     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
335     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
336     setOperationAction(ISD::LRINT, MVT::f16, Legal);
337     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
338     setOperationAction(ISD::LROUND, MVT::f16, Legal);
339     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
340     setOperationAction(ISD::STRICT_LRINT, MVT::f16, Legal);
341     setOperationAction(ISD::STRICT_LLRINT, MVT::f16, Legal);
342     setOperationAction(ISD::STRICT_LROUND, MVT::f16, Legal);
343     setOperationAction(ISD::STRICT_LLROUND, MVT::f16, Legal);
344     setOperationAction(ISD::STRICT_FADD, MVT::f16, Legal);
345     setOperationAction(ISD::STRICT_FMA, MVT::f16, Legal);
346     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Legal);
349     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
350     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
351     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Legal);
352     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Legal);
353     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Legal);
354     for (auto CC : FPCCToExpand)
355       setCondCodeAction(CC, MVT::f16, Expand);
356     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
357     setOperationAction(ISD::SELECT, MVT::f16, Custom);
358     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
359 
360     setOperationAction(ISD::FREM,       MVT::f16, Promote);
361     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
362     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
363     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
364     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
365     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
366     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
367     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
368     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
369     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
370     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
371     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
372     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
373     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
374     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
375     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
376     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
377     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
378 
379     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
380     // complete support for all operations in LegalizeDAG.
381 
382     // We need to custom promote this.
383     if (Subtarget.is64Bit())
384       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
385   }
386 
387   if (Subtarget.hasStdExtF()) {
388     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
389     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
390     setOperationAction(ISD::LRINT, MVT::f32, Legal);
391     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
392     setOperationAction(ISD::LROUND, MVT::f32, Legal);
393     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
394     setOperationAction(ISD::STRICT_LRINT, MVT::f32, Legal);
395     setOperationAction(ISD::STRICT_LLRINT, MVT::f32, Legal);
396     setOperationAction(ISD::STRICT_LROUND, MVT::f32, Legal);
397     setOperationAction(ISD::STRICT_LLROUND, MVT::f32, Legal);
398     setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
399     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
400     setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
401     setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
402     setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
404     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
405     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
406     for (auto CC : FPCCToExpand)
407       setCondCodeAction(CC, MVT::f32, Expand);
408     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
409     setOperationAction(ISD::SELECT, MVT::f32, Custom);
410     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f32, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
418     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
419 
420   if (Subtarget.hasStdExtD()) {
421     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
422     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
423     setOperationAction(ISD::LRINT, MVT::f64, Legal);
424     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
425     setOperationAction(ISD::LROUND, MVT::f64, Legal);
426     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
427     setOperationAction(ISD::STRICT_LRINT, MVT::f64, Legal);
428     setOperationAction(ISD::STRICT_LLRINT, MVT::f64, Legal);
429     setOperationAction(ISD::STRICT_LROUND, MVT::f64, Legal);
430     setOperationAction(ISD::STRICT_LLROUND, MVT::f64, Legal);
431     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
432     setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
433     setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
434     setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
435     setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
436     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
437     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
438     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
439     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
440     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
441     for (auto CC : FPCCToExpand)
442       setCondCodeAction(CC, MVT::f64, Expand);
443     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
444     setOperationAction(ISD::SELECT, MVT::f64, Custom);
445     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
446     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
447     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
448     for (auto Op : FPOpToExpand)
449       setOperationAction(Op, MVT::f64, Expand);
450     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
451     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
452   }
453 
454   if (Subtarget.is64Bit()) {
455     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
456     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
457     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
458     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
459   }
460 
461   if (Subtarget.hasStdExtF()) {
462     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
463     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
464 
465     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
466     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
467     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
468     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
469 
470     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
471     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
472   }
473 
474   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
475   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
476   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
477   setOperationAction(ISD::JumpTable, XLenVT, Custom);
478 
479   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
480 
481   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
482   // Unfortunately this can't be determined just from the ISA naming string.
483   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
484                      Subtarget.is64Bit() ? Legal : Custom);
485 
486   setOperationAction(ISD::TRAP, MVT::Other, Legal);
487   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
488   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
489   if (Subtarget.is64Bit())
490     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
491 
492   if (Subtarget.hasStdExtA()) {
493     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
494     setMinCmpXchgSizeInBits(32);
495   } else {
496     setMaxAtomicSizeInBitsSupported(0);
497   }
498 
499   setBooleanContents(ZeroOrOneBooleanContent);
500 
501   if (Subtarget.hasVInstructions()) {
502     setBooleanVectorContents(ZeroOrOneBooleanContent);
503 
504     setOperationAction(ISD::VSCALE, XLenVT, Custom);
505 
506     // RVV intrinsics may have illegal operands.
507     // We also need to custom legalize vmv.x.s.
508     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
509     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
510     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
511     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
512     if (Subtarget.is64Bit()) {
513       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
514     } else {
515       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
516       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
517     }
518 
519     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
520     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
521 
522     static const unsigned IntegerVPOps[] = {
523         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
524         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
525         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
526         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
527         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
528         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
529         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
530         ISD::VP_MERGE,       ISD::VP_SELECT};
531 
532     static const unsigned FloatingPointVPOps[] = {
533         ISD::VP_FADD,        ISD::VP_FSUB,        ISD::VP_FMUL,
534         ISD::VP_FDIV,        ISD::VP_FNEG,        ISD::VP_FMA,
535         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN,
536         ISD::VP_REDUCE_FMAX, ISD::VP_MERGE,       ISD::VP_SELECT};
537 
538     if (!Subtarget.is64Bit()) {
539       // We must custom-lower certain vXi64 operations on RV32 due to the vector
540       // element type being illegal.
541       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
542       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
543 
544       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
545       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
546       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
547       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
548       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
549       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
550       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
551       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
552 
553       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
554       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
555       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
556       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
557       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
558       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
559       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
560       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
561     }
562 
563     for (MVT VT : BoolVecVTs) {
564       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
565 
566       // Mask VTs are custom-expanded into a series of standard nodes
567       setOperationAction(ISD::TRUNCATE, VT, Custom);
568       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
569       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
570       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
571 
572       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
573       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
574 
575       setOperationAction(ISD::SELECT, VT, Custom);
576       setOperationAction(ISD::SELECT_CC, VT, Expand);
577       setOperationAction(ISD::VSELECT, VT, Expand);
578       setOperationAction(ISD::VP_MERGE, VT, Expand);
579       setOperationAction(ISD::VP_SELECT, VT, Expand);
580 
581       setOperationAction(ISD::VP_AND, VT, Custom);
582       setOperationAction(ISD::VP_OR, VT, Custom);
583       setOperationAction(ISD::VP_XOR, VT, Custom);
584 
585       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
586       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
587       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
588 
589       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
590       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
591       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
592 
593       // RVV has native int->float & float->int conversions where the
594       // element type sizes are within one power-of-two of each other. Any
595       // wider distances between type sizes have to be lowered as sequences
596       // which progressively narrow the gap in stages.
597       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
598       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
599       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
600       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
601 
602       // Expand all extending loads to types larger than this, and truncating
603       // stores from types larger than this.
604       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
605         setTruncStoreAction(OtherVT, VT, Expand);
606         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
607         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
608         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
609       }
610     }
611 
612     for (MVT VT : IntVecVTs) {
613       if (VT.getVectorElementType() == MVT::i64 &&
614           !Subtarget.hasVInstructionsI64())
615         continue;
616 
617       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
618       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
619 
620       // Vectors implement MULHS/MULHU.
621       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
622       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
623 
624       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
625       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
626         setOperationAction(ISD::MULHU, VT, Expand);
627         setOperationAction(ISD::MULHS, VT, Expand);
628       }
629 
630       setOperationAction(ISD::SMIN, VT, Legal);
631       setOperationAction(ISD::SMAX, VT, Legal);
632       setOperationAction(ISD::UMIN, VT, Legal);
633       setOperationAction(ISD::UMAX, VT, Legal);
634 
635       setOperationAction(ISD::ROTL, VT, Expand);
636       setOperationAction(ISD::ROTR, VT, Expand);
637 
638       setOperationAction(ISD::CTTZ, VT, Expand);
639       setOperationAction(ISD::CTLZ, VT, Expand);
640       setOperationAction(ISD::CTPOP, VT, Expand);
641 
642       setOperationAction(ISD::BSWAP, VT, Expand);
643 
644       // Custom-lower extensions and truncations from/to mask types.
645       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
646       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
647       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
648 
649       // RVV has native int->float & float->int conversions where the
650       // element type sizes are within one power-of-two of each other. Any
651       // wider distances between type sizes have to be lowered as sequences
652       // which progressively narrow the gap in stages.
653       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
654       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
655       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
656       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
657 
658       setOperationAction(ISD::SADDSAT, VT, Legal);
659       setOperationAction(ISD::UADDSAT, VT, Legal);
660       setOperationAction(ISD::SSUBSAT, VT, Legal);
661       setOperationAction(ISD::USUBSAT, VT, Legal);
662 
663       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
664       // nodes which truncate by one power of two at a time.
665       setOperationAction(ISD::TRUNCATE, VT, Custom);
666 
667       // Custom-lower insert/extract operations to simplify patterns.
668       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
669       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
670 
671       // Custom-lower reduction operations to set up the corresponding custom
672       // nodes' operands.
673       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
674       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
675       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
676       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
677       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
678       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
679       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
680       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
681 
682       for (unsigned VPOpc : IntegerVPOps)
683         setOperationAction(VPOpc, VT, Custom);
684 
685       setOperationAction(ISD::LOAD, VT, Custom);
686       setOperationAction(ISD::STORE, VT, Custom);
687 
688       setOperationAction(ISD::MLOAD, VT, Custom);
689       setOperationAction(ISD::MSTORE, VT, Custom);
690       setOperationAction(ISD::MGATHER, VT, Custom);
691       setOperationAction(ISD::MSCATTER, VT, Custom);
692 
693       setOperationAction(ISD::VP_LOAD, VT, Custom);
694       setOperationAction(ISD::VP_STORE, VT, Custom);
695       setOperationAction(ISD::VP_GATHER, VT, Custom);
696       setOperationAction(ISD::VP_SCATTER, VT, Custom);
697 
698       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
699       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
700       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
701 
702       setOperationAction(ISD::SELECT, VT, Custom);
703       setOperationAction(ISD::SELECT_CC, VT, Expand);
704 
705       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
706       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
707 
708       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
709         setTruncStoreAction(VT, OtherVT, Expand);
710         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
711         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
712         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
713       }
714 
715       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
716       // type that can represent the value exactly.
717       if (VT.getVectorElementType() != MVT::i64) {
718         MVT FloatEltVT =
719             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
720         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
721         if (isTypeLegal(FloatVT)) {
722           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
723           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
724         }
725       }
726     }
727 
728     // Expand various CCs to best match the RVV ISA, which natively supports UNE
729     // but no other unordered comparisons, and supports all ordered comparisons
730     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
731     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
732     // and we pattern-match those back to the "original", swapping operands once
733     // more. This way we catch both operations and both "vf" and "fv" forms with
734     // fewer patterns.
735     static const ISD::CondCode VFPCCToExpand[] = {
736         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
737         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
738         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
739     };
740 
741     // Sets common operation actions on RVV floating-point vector types.
742     const auto SetCommonVFPActions = [&](MVT VT) {
743       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
744       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
745       // sizes are within one power-of-two of each other. Therefore conversions
746       // between vXf16 and vXf64 must be lowered as sequences which convert via
747       // vXf32.
748       setOperationAction(ISD::FP_ROUND, VT, Custom);
749       setOperationAction(ISD::FP_EXTEND, VT, Custom);
750       // Custom-lower insert/extract operations to simplify patterns.
751       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
752       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
753       // Expand various condition codes (explained above).
754       for (auto CC : VFPCCToExpand)
755         setCondCodeAction(CC, VT, Expand);
756 
757       setOperationAction(ISD::FMINNUM, VT, Legal);
758       setOperationAction(ISD::FMAXNUM, VT, Legal);
759 
760       setOperationAction(ISD::FTRUNC, VT, Custom);
761       setOperationAction(ISD::FCEIL, VT, Custom);
762       setOperationAction(ISD::FFLOOR, VT, Custom);
763       setOperationAction(ISD::FROUND, VT, Custom);
764 
765       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
766       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
767       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
768       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
769 
770       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
771 
772       setOperationAction(ISD::LOAD, VT, Custom);
773       setOperationAction(ISD::STORE, VT, Custom);
774 
775       setOperationAction(ISD::MLOAD, VT, Custom);
776       setOperationAction(ISD::MSTORE, VT, Custom);
777       setOperationAction(ISD::MGATHER, VT, Custom);
778       setOperationAction(ISD::MSCATTER, VT, Custom);
779 
780       setOperationAction(ISD::VP_LOAD, VT, Custom);
781       setOperationAction(ISD::VP_STORE, VT, Custom);
782       setOperationAction(ISD::VP_GATHER, VT, Custom);
783       setOperationAction(ISD::VP_SCATTER, VT, Custom);
784 
785       setOperationAction(ISD::SELECT, VT, Custom);
786       setOperationAction(ISD::SELECT_CC, VT, Expand);
787 
788       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
789       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
790       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
791 
792       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
793 
794       for (unsigned VPOpc : FloatingPointVPOps)
795         setOperationAction(VPOpc, VT, Custom);
796     };
797 
798     // Sets common extload/truncstore actions on RVV floating-point vector
799     // types.
800     const auto SetCommonVFPExtLoadTruncStoreActions =
801         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
802           for (auto SmallVT : SmallerVTs) {
803             setTruncStoreAction(VT, SmallVT, Expand);
804             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
805           }
806         };
807 
808     if (Subtarget.hasVInstructionsF16())
809       for (MVT VT : F16VecVTs)
810         SetCommonVFPActions(VT);
811 
812     for (MVT VT : F32VecVTs) {
813       if (Subtarget.hasVInstructionsF32())
814         SetCommonVFPActions(VT);
815       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
816     }
817 
818     for (MVT VT : F64VecVTs) {
819       if (Subtarget.hasVInstructionsF64())
820         SetCommonVFPActions(VT);
821       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
822       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
823     }
824 
825     if (Subtarget.useRVVForFixedLengthVectors()) {
826       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
827         if (!useRVVForFixedLengthVectorVT(VT))
828           continue;
829 
830         // By default everything must be expanded.
831         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
832           setOperationAction(Op, VT, Expand);
833         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
834           setTruncStoreAction(VT, OtherVT, Expand);
835           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
836           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
837           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
838         }
839 
840         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
841         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
842         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
843 
844         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
845         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
846 
847         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
848         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
849 
850         setOperationAction(ISD::LOAD, VT, Custom);
851         setOperationAction(ISD::STORE, VT, Custom);
852 
853         setOperationAction(ISD::SETCC, VT, Custom);
854 
855         setOperationAction(ISD::SELECT, VT, Custom);
856 
857         setOperationAction(ISD::TRUNCATE, VT, Custom);
858 
859         setOperationAction(ISD::BITCAST, VT, Custom);
860 
861         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
862         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
863         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
864 
865         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
866         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
867         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
868 
869         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
870         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
871         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
872         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
873 
874         // Operations below are different for between masks and other vectors.
875         if (VT.getVectorElementType() == MVT::i1) {
876           setOperationAction(ISD::VP_AND, VT, Custom);
877           setOperationAction(ISD::VP_OR, VT, Custom);
878           setOperationAction(ISD::VP_XOR, VT, Custom);
879           setOperationAction(ISD::AND, VT, Custom);
880           setOperationAction(ISD::OR, VT, Custom);
881           setOperationAction(ISD::XOR, VT, Custom);
882           continue;
883         }
884 
885         // Use SPLAT_VECTOR to prevent type legalization from destroying the
886         // splats when type legalizing i64 scalar on RV32.
887         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
888         // improvements first.
889         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
890           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
891           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
892         }
893 
894         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
895         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
896 
897         setOperationAction(ISD::MLOAD, VT, Custom);
898         setOperationAction(ISD::MSTORE, VT, Custom);
899         setOperationAction(ISD::MGATHER, VT, Custom);
900         setOperationAction(ISD::MSCATTER, VT, Custom);
901 
902         setOperationAction(ISD::VP_LOAD, VT, Custom);
903         setOperationAction(ISD::VP_STORE, VT, Custom);
904         setOperationAction(ISD::VP_GATHER, VT, Custom);
905         setOperationAction(ISD::VP_SCATTER, VT, Custom);
906 
907         setOperationAction(ISD::ADD, VT, Custom);
908         setOperationAction(ISD::MUL, VT, Custom);
909         setOperationAction(ISD::SUB, VT, Custom);
910         setOperationAction(ISD::AND, VT, Custom);
911         setOperationAction(ISD::OR, VT, Custom);
912         setOperationAction(ISD::XOR, VT, Custom);
913         setOperationAction(ISD::SDIV, VT, Custom);
914         setOperationAction(ISD::SREM, VT, Custom);
915         setOperationAction(ISD::UDIV, VT, Custom);
916         setOperationAction(ISD::UREM, VT, Custom);
917         setOperationAction(ISD::SHL, VT, Custom);
918         setOperationAction(ISD::SRA, VT, Custom);
919         setOperationAction(ISD::SRL, VT, Custom);
920 
921         setOperationAction(ISD::SMIN, VT, Custom);
922         setOperationAction(ISD::SMAX, VT, Custom);
923         setOperationAction(ISD::UMIN, VT, Custom);
924         setOperationAction(ISD::UMAX, VT, Custom);
925         setOperationAction(ISD::ABS,  VT, Custom);
926 
927         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
928         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
929           setOperationAction(ISD::MULHS, VT, Custom);
930           setOperationAction(ISD::MULHU, VT, Custom);
931         }
932 
933         setOperationAction(ISD::SADDSAT, VT, Custom);
934         setOperationAction(ISD::UADDSAT, VT, Custom);
935         setOperationAction(ISD::SSUBSAT, VT, Custom);
936         setOperationAction(ISD::USUBSAT, VT, Custom);
937 
938         setOperationAction(ISD::VSELECT, VT, Custom);
939         setOperationAction(ISD::SELECT_CC, VT, Expand);
940 
941         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
942         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
943         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
944 
945         // Custom-lower reduction operations to set up the corresponding custom
946         // nodes' operands.
947         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
948         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
949         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
950         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
951         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
952 
953         for (unsigned VPOpc : IntegerVPOps)
954           setOperationAction(VPOpc, VT, Custom);
955 
956         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
957         // type that can represent the value exactly.
958         if (VT.getVectorElementType() != MVT::i64) {
959           MVT FloatEltVT =
960               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
961           EVT FloatVT =
962               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
963           if (isTypeLegal(FloatVT)) {
964             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
965             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
966           }
967         }
968       }
969 
970       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
971         if (!useRVVForFixedLengthVectorVT(VT))
972           continue;
973 
974         // By default everything must be expanded.
975         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
976           setOperationAction(Op, VT, Expand);
977         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
978           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
979           setTruncStoreAction(VT, OtherVT, Expand);
980         }
981 
982         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
983         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
984         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
985 
986         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
987         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
988         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
989         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
990         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
991 
992         setOperationAction(ISD::LOAD, VT, Custom);
993         setOperationAction(ISD::STORE, VT, Custom);
994         setOperationAction(ISD::MLOAD, VT, Custom);
995         setOperationAction(ISD::MSTORE, VT, Custom);
996         setOperationAction(ISD::MGATHER, VT, Custom);
997         setOperationAction(ISD::MSCATTER, VT, Custom);
998 
999         setOperationAction(ISD::VP_LOAD, VT, Custom);
1000         setOperationAction(ISD::VP_STORE, VT, Custom);
1001         setOperationAction(ISD::VP_GATHER, VT, Custom);
1002         setOperationAction(ISD::VP_SCATTER, VT, Custom);
1003 
1004         setOperationAction(ISD::FADD, VT, Custom);
1005         setOperationAction(ISD::FSUB, VT, Custom);
1006         setOperationAction(ISD::FMUL, VT, Custom);
1007         setOperationAction(ISD::FDIV, VT, Custom);
1008         setOperationAction(ISD::FNEG, VT, Custom);
1009         setOperationAction(ISD::FABS, VT, Custom);
1010         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1011         setOperationAction(ISD::FSQRT, VT, Custom);
1012         setOperationAction(ISD::FMA, VT, Custom);
1013         setOperationAction(ISD::FMINNUM, VT, Custom);
1014         setOperationAction(ISD::FMAXNUM, VT, Custom);
1015 
1016         setOperationAction(ISD::FP_ROUND, VT, Custom);
1017         setOperationAction(ISD::FP_EXTEND, VT, Custom);
1018 
1019         setOperationAction(ISD::FTRUNC, VT, Custom);
1020         setOperationAction(ISD::FCEIL, VT, Custom);
1021         setOperationAction(ISD::FFLOOR, VT, Custom);
1022         setOperationAction(ISD::FROUND, VT, Custom);
1023 
1024         for (auto CC : VFPCCToExpand)
1025           setCondCodeAction(CC, VT, Expand);
1026 
1027         setOperationAction(ISD::VSELECT, VT, Custom);
1028         setOperationAction(ISD::SELECT, VT, Custom);
1029         setOperationAction(ISD::SELECT_CC, VT, Expand);
1030 
1031         setOperationAction(ISD::BITCAST, VT, Custom);
1032 
1033         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1034         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1035         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1036         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1037 
1038         for (unsigned VPOpc : FloatingPointVPOps)
1039           setOperationAction(VPOpc, VT, Custom);
1040       }
1041 
1042       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1043       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1044       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1045       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1046       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1047       if (Subtarget.hasStdExtZfh())
1048         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1049       if (Subtarget.hasStdExtF())
1050         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1051       if (Subtarget.hasStdExtD())
1052         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1053     }
1054   }
1055 
1056   // Function alignments.
1057   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1058   setMinFunctionAlignment(FunctionAlignment);
1059   setPrefFunctionAlignment(FunctionAlignment);
1060 
1061   setMinimumJumpTableEntries(5);
1062 
1063   // Jumps are expensive, compared to logic
1064   setJumpIsExpensive();
1065 
1066   setTargetDAGCombine(ISD::ADD);
1067   setTargetDAGCombine(ISD::SUB);
1068   setTargetDAGCombine(ISD::AND);
1069   setTargetDAGCombine(ISD::OR);
1070   setTargetDAGCombine(ISD::XOR);
1071   setTargetDAGCombine(ISD::ANY_EXTEND);
1072   if (Subtarget.hasStdExtF()) {
1073     setTargetDAGCombine(ISD::ZERO_EXTEND);
1074     setTargetDAGCombine(ISD::FP_TO_SINT);
1075     setTargetDAGCombine(ISD::FP_TO_UINT);
1076     setTargetDAGCombine(ISD::FP_TO_SINT_SAT);
1077     setTargetDAGCombine(ISD::FP_TO_UINT_SAT);
1078   }
1079   if (Subtarget.hasVInstructions()) {
1080     setTargetDAGCombine(ISD::FCOPYSIGN);
1081     setTargetDAGCombine(ISD::MGATHER);
1082     setTargetDAGCombine(ISD::MSCATTER);
1083     setTargetDAGCombine(ISD::VP_GATHER);
1084     setTargetDAGCombine(ISD::VP_SCATTER);
1085     setTargetDAGCombine(ISD::SRA);
1086     setTargetDAGCombine(ISD::SRL);
1087     setTargetDAGCombine(ISD::SHL);
1088     setTargetDAGCombine(ISD::STORE);
1089     setTargetDAGCombine(ISD::SPLAT_VECTOR);
1090   }
1091 
1092   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1093   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1094 }
1095 
1096 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1097                                             LLVMContext &Context,
1098                                             EVT VT) const {
1099   if (!VT.isVector())
1100     return getPointerTy(DL);
1101   if (Subtarget.hasVInstructions() &&
1102       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1103     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1104   return VT.changeVectorElementTypeToInteger();
1105 }
1106 
1107 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1108   return Subtarget.getXLenVT();
1109 }
1110 
1111 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1112                                              const CallInst &I,
1113                                              MachineFunction &MF,
1114                                              unsigned Intrinsic) const {
1115   auto &DL = I.getModule()->getDataLayout();
1116   switch (Intrinsic) {
1117   default:
1118     return false;
1119   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1120   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1121   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1122   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1123   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1124   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1125   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1126   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1127   case Intrinsic::riscv_masked_cmpxchg_i32:
1128     Info.opc = ISD::INTRINSIC_W_CHAIN;
1129     Info.memVT = MVT::i32;
1130     Info.ptrVal = I.getArgOperand(0);
1131     Info.offset = 0;
1132     Info.align = Align(4);
1133     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1134                  MachineMemOperand::MOVolatile;
1135     return true;
1136   case Intrinsic::riscv_masked_strided_load:
1137     Info.opc = ISD::INTRINSIC_W_CHAIN;
1138     Info.ptrVal = I.getArgOperand(1);
1139     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1140     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1141     Info.size = MemoryLocation::UnknownSize;
1142     Info.flags |= MachineMemOperand::MOLoad;
1143     return true;
1144   case Intrinsic::riscv_masked_strided_store:
1145     Info.opc = ISD::INTRINSIC_VOID;
1146     Info.ptrVal = I.getArgOperand(1);
1147     Info.memVT =
1148         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1149     Info.align = Align(
1150         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1151         8);
1152     Info.size = MemoryLocation::UnknownSize;
1153     Info.flags |= MachineMemOperand::MOStore;
1154     return true;
1155   }
1156 }
1157 
1158 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1159                                                 const AddrMode &AM, Type *Ty,
1160                                                 unsigned AS,
1161                                                 Instruction *I) const {
1162   // No global is ever allowed as a base.
1163   if (AM.BaseGV)
1164     return false;
1165 
1166   // Require a 12-bit signed offset.
1167   if (!isInt<12>(AM.BaseOffs))
1168     return false;
1169 
1170   switch (AM.Scale) {
1171   case 0: // "r+i" or just "i", depending on HasBaseReg.
1172     break;
1173   case 1:
1174     if (!AM.HasBaseReg) // allow "r+i".
1175       break;
1176     return false; // disallow "r+r" or "r+r+i".
1177   default:
1178     return false;
1179   }
1180 
1181   return true;
1182 }
1183 
1184 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1185   return isInt<12>(Imm);
1186 }
1187 
1188 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1189   return isInt<12>(Imm);
1190 }
1191 
1192 // On RV32, 64-bit integers are split into their high and low parts and held
1193 // in two different registers, so the trunc is free since the low register can
1194 // just be used.
1195 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1196   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1197     return false;
1198   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1199   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1200   return (SrcBits == 64 && DestBits == 32);
1201 }
1202 
1203 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1204   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1205       !SrcVT.isInteger() || !DstVT.isInteger())
1206     return false;
1207   unsigned SrcBits = SrcVT.getSizeInBits();
1208   unsigned DestBits = DstVT.getSizeInBits();
1209   return (SrcBits == 64 && DestBits == 32);
1210 }
1211 
1212 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1213   // Zexts are free if they can be combined with a load.
1214   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1215   // poorly with type legalization of compares preferring sext.
1216   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1217     EVT MemVT = LD->getMemoryVT();
1218     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1219         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1220          LD->getExtensionType() == ISD::ZEXTLOAD))
1221       return true;
1222   }
1223 
1224   return TargetLowering::isZExtFree(Val, VT2);
1225 }
1226 
1227 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1228   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1229 }
1230 
1231 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1232   return Subtarget.hasStdExtZbb();
1233 }
1234 
1235 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1236   return Subtarget.hasStdExtZbb();
1237 }
1238 
1239 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1240   EVT VT = Y.getValueType();
1241 
1242   // FIXME: Support vectors once we have tests.
1243   if (VT.isVector())
1244     return false;
1245 
1246   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1247           Subtarget.hasStdExtZbkb()) &&
1248          !isa<ConstantSDNode>(Y);
1249 }
1250 
1251 /// Check if sinking \p I's operands to I's basic block is profitable, because
1252 /// the operands can be folded into a target instruction, e.g.
1253 /// splats of scalars can fold into vector instructions.
1254 bool RISCVTargetLowering::shouldSinkOperands(
1255     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1256   using namespace llvm::PatternMatch;
1257 
1258   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1259     return false;
1260 
1261   auto IsSinker = [&](Instruction *I, int Operand) {
1262     switch (I->getOpcode()) {
1263     case Instruction::Add:
1264     case Instruction::Sub:
1265     case Instruction::Mul:
1266     case Instruction::And:
1267     case Instruction::Or:
1268     case Instruction::Xor:
1269     case Instruction::FAdd:
1270     case Instruction::FSub:
1271     case Instruction::FMul:
1272     case Instruction::FDiv:
1273     case Instruction::ICmp:
1274     case Instruction::FCmp:
1275       return true;
1276     case Instruction::Shl:
1277     case Instruction::LShr:
1278     case Instruction::AShr:
1279     case Instruction::UDiv:
1280     case Instruction::SDiv:
1281     case Instruction::URem:
1282     case Instruction::SRem:
1283       return Operand == 1;
1284     case Instruction::Call:
1285       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1286         switch (II->getIntrinsicID()) {
1287         case Intrinsic::fma:
1288           return Operand == 0 || Operand == 1;
1289         // FIXME: Our patterns can only match vx/vf instructions when the splat
1290         // it on the RHS, because TableGen doesn't recognize our VP operations
1291         // as commutative.
1292         case Intrinsic::vp_add:
1293         case Intrinsic::vp_mul:
1294         case Intrinsic::vp_and:
1295         case Intrinsic::vp_or:
1296         case Intrinsic::vp_xor:
1297         case Intrinsic::vp_fadd:
1298         case Intrinsic::vp_fmul:
1299         case Intrinsic::vp_shl:
1300         case Intrinsic::vp_lshr:
1301         case Intrinsic::vp_ashr:
1302         case Intrinsic::vp_udiv:
1303         case Intrinsic::vp_sdiv:
1304         case Intrinsic::vp_urem:
1305         case Intrinsic::vp_srem:
1306           return Operand == 1;
1307         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1308         // explicit patterns for both LHS and RHS (as 'vr' versions).
1309         case Intrinsic::vp_sub:
1310         case Intrinsic::vp_fsub:
1311         case Intrinsic::vp_fdiv:
1312           return Operand == 0 || Operand == 1;
1313         default:
1314           return false;
1315         }
1316       }
1317       return false;
1318     default:
1319       return false;
1320     }
1321   };
1322 
1323   for (auto OpIdx : enumerate(I->operands())) {
1324     if (!IsSinker(I, OpIdx.index()))
1325       continue;
1326 
1327     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1328     // Make sure we are not already sinking this operand
1329     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1330       continue;
1331 
1332     // We are looking for a splat that can be sunk.
1333     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1334                              m_Undef(), m_ZeroMask())))
1335       continue;
1336 
1337     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1338     // and vector registers
1339     for (Use &U : Op->uses()) {
1340       Instruction *Insn = cast<Instruction>(U.getUser());
1341       if (!IsSinker(Insn, U.getOperandNo()))
1342         return false;
1343     }
1344 
1345     Ops.push_back(&Op->getOperandUse(0));
1346     Ops.push_back(&OpIdx.value());
1347   }
1348   return true;
1349 }
1350 
1351 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1352                                        bool ForCodeSize) const {
1353   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1354   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1355     return false;
1356   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1357     return false;
1358   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1359     return false;
1360   return Imm.isZero();
1361 }
1362 
1363 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1364   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1365          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1366          (VT == MVT::f64 && Subtarget.hasStdExtD());
1367 }
1368 
1369 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1370                                                       CallingConv::ID CC,
1371                                                       EVT VT) const {
1372   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1373   // We might still end up using a GPR but that will be decided based on ABI.
1374   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1375   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1376     return MVT::f32;
1377 
1378   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1379 }
1380 
1381 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1382                                                            CallingConv::ID CC,
1383                                                            EVT VT) const {
1384   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1385   // We might still end up using a GPR but that will be decided based on ABI.
1386   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1387   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1388     return 1;
1389 
1390   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1391 }
1392 
1393 // Changes the condition code and swaps operands if necessary, so the SetCC
1394 // operation matches one of the comparisons supported directly by branches
1395 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1396 // with 1/-1.
1397 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1398                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1399   // Convert X > -1 to X >= 0.
1400   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1401     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1402     CC = ISD::SETGE;
1403     return;
1404   }
1405   // Convert X < 1 to 0 >= X.
1406   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1407     RHS = LHS;
1408     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1409     CC = ISD::SETGE;
1410     return;
1411   }
1412 
1413   switch (CC) {
1414   default:
1415     break;
1416   case ISD::SETGT:
1417   case ISD::SETLE:
1418   case ISD::SETUGT:
1419   case ISD::SETULE:
1420     CC = ISD::getSetCCSwappedOperands(CC);
1421     std::swap(LHS, RHS);
1422     break;
1423   }
1424 }
1425 
1426 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1427   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1428   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1429   if (VT.getVectorElementType() == MVT::i1)
1430     KnownSize *= 8;
1431 
1432   switch (KnownSize) {
1433   default:
1434     llvm_unreachable("Invalid LMUL.");
1435   case 8:
1436     return RISCVII::VLMUL::LMUL_F8;
1437   case 16:
1438     return RISCVII::VLMUL::LMUL_F4;
1439   case 32:
1440     return RISCVII::VLMUL::LMUL_F2;
1441   case 64:
1442     return RISCVII::VLMUL::LMUL_1;
1443   case 128:
1444     return RISCVII::VLMUL::LMUL_2;
1445   case 256:
1446     return RISCVII::VLMUL::LMUL_4;
1447   case 512:
1448     return RISCVII::VLMUL::LMUL_8;
1449   }
1450 }
1451 
1452 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1453   switch (LMul) {
1454   default:
1455     llvm_unreachable("Invalid LMUL.");
1456   case RISCVII::VLMUL::LMUL_F8:
1457   case RISCVII::VLMUL::LMUL_F4:
1458   case RISCVII::VLMUL::LMUL_F2:
1459   case RISCVII::VLMUL::LMUL_1:
1460     return RISCV::VRRegClassID;
1461   case RISCVII::VLMUL::LMUL_2:
1462     return RISCV::VRM2RegClassID;
1463   case RISCVII::VLMUL::LMUL_4:
1464     return RISCV::VRM4RegClassID;
1465   case RISCVII::VLMUL::LMUL_8:
1466     return RISCV::VRM8RegClassID;
1467   }
1468 }
1469 
1470 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1471   RISCVII::VLMUL LMUL = getLMUL(VT);
1472   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1473       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1474       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1475       LMUL == RISCVII::VLMUL::LMUL_1) {
1476     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1477                   "Unexpected subreg numbering");
1478     return RISCV::sub_vrm1_0 + Index;
1479   }
1480   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1481     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1482                   "Unexpected subreg numbering");
1483     return RISCV::sub_vrm2_0 + Index;
1484   }
1485   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1486     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1487                   "Unexpected subreg numbering");
1488     return RISCV::sub_vrm4_0 + Index;
1489   }
1490   llvm_unreachable("Invalid vector type.");
1491 }
1492 
1493 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1494   if (VT.getVectorElementType() == MVT::i1)
1495     return RISCV::VRRegClassID;
1496   return getRegClassIDForLMUL(getLMUL(VT));
1497 }
1498 
1499 // Attempt to decompose a subvector insert/extract between VecVT and
1500 // SubVecVT via subregister indices. Returns the subregister index that
1501 // can perform the subvector insert/extract with the given element index, as
1502 // well as the index corresponding to any leftover subvectors that must be
1503 // further inserted/extracted within the register class for SubVecVT.
1504 std::pair<unsigned, unsigned>
1505 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1506     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1507     const RISCVRegisterInfo *TRI) {
1508   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1509                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1510                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1511                 "Register classes not ordered");
1512   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1513   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1514   // Try to compose a subregister index that takes us from the incoming
1515   // LMUL>1 register class down to the outgoing one. At each step we half
1516   // the LMUL:
1517   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1518   // Note that this is not guaranteed to find a subregister index, such as
1519   // when we are extracting from one VR type to another.
1520   unsigned SubRegIdx = RISCV::NoSubRegister;
1521   for (const unsigned RCID :
1522        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1523     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1524       VecVT = VecVT.getHalfNumVectorElementsVT();
1525       bool IsHi =
1526           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1527       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1528                                             getSubregIndexByMVT(VecVT, IsHi));
1529       if (IsHi)
1530         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1531     }
1532   return {SubRegIdx, InsertExtractIdx};
1533 }
1534 
1535 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1536 // stores for those types.
1537 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1538   return !Subtarget.useRVVForFixedLengthVectors() ||
1539          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1540 }
1541 
1542 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1543   if (ScalarTy->isPointerTy())
1544     return true;
1545 
1546   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1547       ScalarTy->isIntegerTy(32))
1548     return true;
1549 
1550   if (ScalarTy->isIntegerTy(64))
1551     return Subtarget.hasVInstructionsI64();
1552 
1553   if (ScalarTy->isHalfTy())
1554     return Subtarget.hasVInstructionsF16();
1555   if (ScalarTy->isFloatTy())
1556     return Subtarget.hasVInstructionsF32();
1557   if (ScalarTy->isDoubleTy())
1558     return Subtarget.hasVInstructionsF64();
1559 
1560   return false;
1561 }
1562 
1563 static SDValue getVLOperand(SDValue Op) {
1564   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1565           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1566          "Unexpected opcode");
1567   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1568   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1569   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1570       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1571   if (!II)
1572     return SDValue();
1573   return Op.getOperand(II->VLOperand + 1 + HasChain);
1574 }
1575 
1576 static bool useRVVForFixedLengthVectorVT(MVT VT,
1577                                          const RISCVSubtarget &Subtarget) {
1578   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1579   if (!Subtarget.useRVVForFixedLengthVectors())
1580     return false;
1581 
1582   // We only support a set of vector types with a consistent maximum fixed size
1583   // across all supported vector element types to avoid legalization issues.
1584   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1585   // fixed-length vector type we support is 1024 bytes.
1586   if (VT.getFixedSizeInBits() > 1024 * 8)
1587     return false;
1588 
1589   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1590 
1591   MVT EltVT = VT.getVectorElementType();
1592 
1593   // Don't use RVV for vectors we cannot scalarize if required.
1594   switch (EltVT.SimpleTy) {
1595   // i1 is supported but has different rules.
1596   default:
1597     return false;
1598   case MVT::i1:
1599     // Masks can only use a single register.
1600     if (VT.getVectorNumElements() > MinVLen)
1601       return false;
1602     MinVLen /= 8;
1603     break;
1604   case MVT::i8:
1605   case MVT::i16:
1606   case MVT::i32:
1607     break;
1608   case MVT::i64:
1609     if (!Subtarget.hasVInstructionsI64())
1610       return false;
1611     break;
1612   case MVT::f16:
1613     if (!Subtarget.hasVInstructionsF16())
1614       return false;
1615     break;
1616   case MVT::f32:
1617     if (!Subtarget.hasVInstructionsF32())
1618       return false;
1619     break;
1620   case MVT::f64:
1621     if (!Subtarget.hasVInstructionsF64())
1622       return false;
1623     break;
1624   }
1625 
1626   // Reject elements larger than ELEN.
1627   if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1628     return false;
1629 
1630   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1631   // Don't use RVV for types that don't fit.
1632   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1633     return false;
1634 
1635   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1636   // the base fixed length RVV support in place.
1637   if (!VT.isPow2VectorType())
1638     return false;
1639 
1640   return true;
1641 }
1642 
1643 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1644   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1645 }
1646 
1647 // Return the largest legal scalable vector type that matches VT's element type.
1648 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1649                                             const RISCVSubtarget &Subtarget) {
1650   // This may be called before legal types are setup.
1651   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1652           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1653          "Expected legal fixed length vector!");
1654 
1655   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1656   unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1657 
1658   MVT EltVT = VT.getVectorElementType();
1659   switch (EltVT.SimpleTy) {
1660   default:
1661     llvm_unreachable("unexpected element type for RVV container");
1662   case MVT::i1:
1663   case MVT::i8:
1664   case MVT::i16:
1665   case MVT::i32:
1666   case MVT::i64:
1667   case MVT::f16:
1668   case MVT::f32:
1669   case MVT::f64: {
1670     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1671     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1672     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1673     unsigned NumElts =
1674         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1675     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1676     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1677     return MVT::getScalableVectorVT(EltVT, NumElts);
1678   }
1679   }
1680 }
1681 
1682 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1683                                             const RISCVSubtarget &Subtarget) {
1684   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1685                                           Subtarget);
1686 }
1687 
1688 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1689   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1690 }
1691 
1692 // Grow V to consume an entire RVV register.
1693 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1694                                        const RISCVSubtarget &Subtarget) {
1695   assert(VT.isScalableVector() &&
1696          "Expected to convert into a scalable vector!");
1697   assert(V.getValueType().isFixedLengthVector() &&
1698          "Expected a fixed length vector operand!");
1699   SDLoc DL(V);
1700   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1701   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1702 }
1703 
1704 // Shrink V so it's just big enough to maintain a VT's worth of data.
1705 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1706                                          const RISCVSubtarget &Subtarget) {
1707   assert(VT.isFixedLengthVector() &&
1708          "Expected to convert into a fixed length vector!");
1709   assert(V.getValueType().isScalableVector() &&
1710          "Expected a scalable vector operand!");
1711   SDLoc DL(V);
1712   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1713   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1714 }
1715 
1716 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1717 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1718 // the vector type that it is contained in.
1719 static std::pair<SDValue, SDValue>
1720 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1721                 const RISCVSubtarget &Subtarget) {
1722   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1723   MVT XLenVT = Subtarget.getXLenVT();
1724   SDValue VL = VecVT.isFixedLengthVector()
1725                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1726                    : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1727   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1728   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1729   return {Mask, VL};
1730 }
1731 
1732 // As above but assuming the given type is a scalable vector type.
1733 static std::pair<SDValue, SDValue>
1734 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1735                         const RISCVSubtarget &Subtarget) {
1736   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1737   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1738 }
1739 
1740 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1741 // of either is (currently) supported. This can get us into an infinite loop
1742 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1743 // as a ..., etc.
1744 // Until either (or both) of these can reliably lower any node, reporting that
1745 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1746 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1747 // which is not desirable.
1748 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1749     EVT VT, unsigned DefinedValues) const {
1750   return false;
1751 }
1752 
1753 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1754                                   const RISCVSubtarget &Subtarget) {
1755   // RISCV FP-to-int conversions saturate to the destination register size, but
1756   // don't produce 0 for nan. We can use a conversion instruction and fix the
1757   // nan case with a compare and a select.
1758   SDValue Src = Op.getOperand(0);
1759 
1760   EVT DstVT = Op.getValueType();
1761   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1762 
1763   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1764   unsigned Opc;
1765   if (SatVT == DstVT)
1766     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1767   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1768     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1769   else
1770     return SDValue();
1771   // FIXME: Support other SatVTs by clamping before or after the conversion.
1772 
1773   SDLoc DL(Op);
1774   SDValue FpToInt = DAG.getNode(
1775       Opc, DL, DstVT, Src,
1776       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1777 
1778   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1779   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1780 }
1781 
1782 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1783 // and back. Taking care to avoid converting values that are nan or already
1784 // correct.
1785 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1786 // have FRM dependencies modeled yet.
1787 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1788   MVT VT = Op.getSimpleValueType();
1789   assert(VT.isVector() && "Unexpected type");
1790 
1791   SDLoc DL(Op);
1792 
1793   // Freeze the source since we are increasing the number of uses.
1794   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1795 
1796   // Truncate to integer and convert back to FP.
1797   MVT IntVT = VT.changeVectorElementTypeToInteger();
1798   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1799   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1800 
1801   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1802 
1803   if (Op.getOpcode() == ISD::FCEIL) {
1804     // If the truncated value is the greater than or equal to the original
1805     // value, we've computed the ceil. Otherwise, we went the wrong way and
1806     // need to increase by 1.
1807     // FIXME: This should use a masked operation. Handle here or in isel?
1808     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1809                                  DAG.getConstantFP(1.0, DL, VT));
1810     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1811     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1812   } else if (Op.getOpcode() == ISD::FFLOOR) {
1813     // If the truncated value is the less than or equal to the original value,
1814     // we've computed the floor. Otherwise, we went the wrong way and need to
1815     // decrease by 1.
1816     // FIXME: This should use a masked operation. Handle here or in isel?
1817     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1818                                  DAG.getConstantFP(1.0, DL, VT));
1819     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1820     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1821   }
1822 
1823   // Restore the original sign so that -0.0 is preserved.
1824   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1825 
1826   // Determine the largest integer that can be represented exactly. This and
1827   // values larger than it don't have any fractional bits so don't need to
1828   // be converted.
1829   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1830   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1831   APFloat MaxVal = APFloat(FltSem);
1832   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1833                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1834   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1835 
1836   // If abs(Src) was larger than MaxVal or nan, keep it.
1837   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1838   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1839   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1840 }
1841 
1842 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1843 // This mode isn't supported in vector hardware on RISCV. But as long as we
1844 // aren't compiling with trapping math, we can emulate this with
1845 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1846 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1847 // dependencies modeled yet.
1848 // FIXME: Use masked operations to avoid final merge.
1849 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1850   MVT VT = Op.getSimpleValueType();
1851   assert(VT.isVector() && "Unexpected type");
1852 
1853   SDLoc DL(Op);
1854 
1855   // Freeze the source since we are increasing the number of uses.
1856   SDValue Src = DAG.getNode(ISD::FREEZE, DL, VT, Op.getOperand(0));
1857 
1858   // We do the conversion on the absolute value and fix the sign at the end.
1859   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1860 
1861   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1862   bool Ignored;
1863   APFloat Point5Pred = APFloat(0.5f);
1864   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1865   Point5Pred.next(/*nextDown*/ true);
1866 
1867   // Add the adjustment.
1868   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1869                                DAG.getConstantFP(Point5Pred, DL, VT));
1870 
1871   // Truncate to integer and convert back to fp.
1872   MVT IntVT = VT.changeVectorElementTypeToInteger();
1873   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1874   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1875 
1876   // Restore the original sign.
1877   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1878 
1879   // Determine the largest integer that can be represented exactly. This and
1880   // values larger than it don't have any fractional bits so don't need to
1881   // be converted.
1882   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1883   APFloat MaxVal = APFloat(FltSem);
1884   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1885                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1886   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1887 
1888   // If abs(Src) was larger than MaxVal or nan, keep it.
1889   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1890   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1891   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1892 }
1893 
1894 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1895                                  const RISCVSubtarget &Subtarget) {
1896   MVT VT = Op.getSimpleValueType();
1897   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1898 
1899   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1900 
1901   SDLoc DL(Op);
1902   SDValue Mask, VL;
1903   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1904 
1905   unsigned Opc =
1906       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1907   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1908   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1909 }
1910 
1911 struct VIDSequence {
1912   int64_t StepNumerator;
1913   unsigned StepDenominator;
1914   int64_t Addend;
1915 };
1916 
1917 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1918 // to the (non-zero) step S and start value X. This can be then lowered as the
1919 // RVV sequence (VID * S) + X, for example.
1920 // The step S is represented as an integer numerator divided by a positive
1921 // denominator. Note that the implementation currently only identifies
1922 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1923 // cannot detect 2/3, for example.
1924 // Note that this method will also match potentially unappealing index
1925 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1926 // determine whether this is worth generating code for.
1927 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1928   unsigned NumElts = Op.getNumOperands();
1929   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1930   if (!Op.getValueType().isInteger())
1931     return None;
1932 
1933   Optional<unsigned> SeqStepDenom;
1934   Optional<int64_t> SeqStepNum, SeqAddend;
1935   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1936   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1937   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1938     // Assume undef elements match the sequence; we just have to be careful
1939     // when interpolating across them.
1940     if (Op.getOperand(Idx).isUndef())
1941       continue;
1942     // The BUILD_VECTOR must be all constants.
1943     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1944       return None;
1945 
1946     uint64_t Val = Op.getConstantOperandVal(Idx) &
1947                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1948 
1949     if (PrevElt) {
1950       // Calculate the step since the last non-undef element, and ensure
1951       // it's consistent across the entire sequence.
1952       unsigned IdxDiff = Idx - PrevElt->second;
1953       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1954 
1955       // A zero-value value difference means that we're somewhere in the middle
1956       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1957       // step change before evaluating the sequence.
1958       if (ValDiff != 0) {
1959         int64_t Remainder = ValDiff % IdxDiff;
1960         // Normalize the step if it's greater than 1.
1961         if (Remainder != ValDiff) {
1962           // The difference must cleanly divide the element span.
1963           if (Remainder != 0)
1964             return None;
1965           ValDiff /= IdxDiff;
1966           IdxDiff = 1;
1967         }
1968 
1969         if (!SeqStepNum)
1970           SeqStepNum = ValDiff;
1971         else if (ValDiff != SeqStepNum)
1972           return None;
1973 
1974         if (!SeqStepDenom)
1975           SeqStepDenom = IdxDiff;
1976         else if (IdxDiff != *SeqStepDenom)
1977           return None;
1978       }
1979     }
1980 
1981     // Record and/or check any addend.
1982     if (SeqStepNum && SeqStepDenom) {
1983       uint64_t ExpectedVal =
1984           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1985       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1986       if (!SeqAddend)
1987         SeqAddend = Addend;
1988       else if (SeqAddend != Addend)
1989         return None;
1990     }
1991 
1992     // Record this non-undef element for later.
1993     if (!PrevElt || PrevElt->first != Val)
1994       PrevElt = std::make_pair(Val, Idx);
1995   }
1996   // We need to have logged both a step and an addend for this to count as
1997   // a legal index sequence.
1998   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1999     return None;
2000 
2001   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
2002 }
2003 
2004 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
2005 // and lower it as a VRGATHER_VX_VL from the source vector.
2006 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
2007                                   SelectionDAG &DAG,
2008                                   const RISCVSubtarget &Subtarget) {
2009   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
2010     return SDValue();
2011   SDValue Vec = SplatVal.getOperand(0);
2012   // Only perform this optimization on vectors of the same size for simplicity.
2013   if (Vec.getValueType() != VT)
2014     return SDValue();
2015   SDValue Idx = SplatVal.getOperand(1);
2016   // The index must be a legal type.
2017   if (Idx.getValueType() != Subtarget.getXLenVT())
2018     return SDValue();
2019 
2020   MVT ContainerVT = VT;
2021   if (VT.isFixedLengthVector()) {
2022     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2023     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2024   }
2025 
2026   SDValue Mask, VL;
2027   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2028 
2029   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2030                                Idx, Mask, VL);
2031 
2032   if (!VT.isFixedLengthVector())
2033     return Gather;
2034 
2035   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2036 }
2037 
2038 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2039                                  const RISCVSubtarget &Subtarget) {
2040   MVT VT = Op.getSimpleValueType();
2041   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2042 
2043   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2044 
2045   SDLoc DL(Op);
2046   SDValue Mask, VL;
2047   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2048 
2049   MVT XLenVT = Subtarget.getXLenVT();
2050   unsigned NumElts = Op.getNumOperands();
2051 
2052   if (VT.getVectorElementType() == MVT::i1) {
2053     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2054       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2055       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2056     }
2057 
2058     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2059       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2060       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2061     }
2062 
2063     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2064     // scalar integer chunks whose bit-width depends on the number of mask
2065     // bits and XLEN.
2066     // First, determine the most appropriate scalar integer type to use. This
2067     // is at most XLenVT, but may be shrunk to a smaller vector element type
2068     // according to the size of the final vector - use i8 chunks rather than
2069     // XLenVT if we're producing a v8i1. This results in more consistent
2070     // codegen across RV32 and RV64.
2071     unsigned NumViaIntegerBits =
2072         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2073     NumViaIntegerBits = std::min(NumViaIntegerBits,
2074                                  Subtarget.getMaxELENForFixedLengthVectors());
2075     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2076       // If we have to use more than one INSERT_VECTOR_ELT then this
2077       // optimization is likely to increase code size; avoid peforming it in
2078       // such a case. We can use a load from a constant pool in this case.
2079       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2080         return SDValue();
2081       // Now we can create our integer vector type. Note that it may be larger
2082       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2083       MVT IntegerViaVecVT =
2084           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2085                            divideCeil(NumElts, NumViaIntegerBits));
2086 
2087       uint64_t Bits = 0;
2088       unsigned BitPos = 0, IntegerEltIdx = 0;
2089       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2090 
2091       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2092         // Once we accumulate enough bits to fill our scalar type, insert into
2093         // our vector and clear our accumulated data.
2094         if (I != 0 && I % NumViaIntegerBits == 0) {
2095           if (NumViaIntegerBits <= 32)
2096             Bits = SignExtend64(Bits, 32);
2097           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2098           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2099                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2100           Bits = 0;
2101           BitPos = 0;
2102           IntegerEltIdx++;
2103         }
2104         SDValue V = Op.getOperand(I);
2105         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2106         Bits |= ((uint64_t)BitValue << BitPos);
2107       }
2108 
2109       // Insert the (remaining) scalar value into position in our integer
2110       // vector type.
2111       if (NumViaIntegerBits <= 32)
2112         Bits = SignExtend64(Bits, 32);
2113       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2114       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2115                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2116 
2117       if (NumElts < NumViaIntegerBits) {
2118         // If we're producing a smaller vector than our minimum legal integer
2119         // type, bitcast to the equivalent (known-legal) mask type, and extract
2120         // our final mask.
2121         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2122         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2123         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2124                           DAG.getConstant(0, DL, XLenVT));
2125       } else {
2126         // Else we must have produced an integer type with the same size as the
2127         // mask type; bitcast for the final result.
2128         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2129         Vec = DAG.getBitcast(VT, Vec);
2130       }
2131 
2132       return Vec;
2133     }
2134 
2135     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2136     // vector type, we have a legal equivalently-sized i8 type, so we can use
2137     // that.
2138     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2139     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2140 
2141     SDValue WideVec;
2142     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2143       // For a splat, perform a scalar truncate before creating the wider
2144       // vector.
2145       assert(Splat.getValueType() == XLenVT &&
2146              "Unexpected type for i1 splat value");
2147       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2148                           DAG.getConstant(1, DL, XLenVT));
2149       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2150     } else {
2151       SmallVector<SDValue, 8> Ops(Op->op_values());
2152       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2153       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2154       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2155     }
2156 
2157     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2158   }
2159 
2160   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2161     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2162       return Gather;
2163     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2164                                         : RISCVISD::VMV_V_X_VL;
2165     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
2166     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2167   }
2168 
2169   // Try and match index sequences, which we can lower to the vid instruction
2170   // with optional modifications. An all-undef vector is matched by
2171   // getSplatValue, above.
2172   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2173     int64_t StepNumerator = SimpleVID->StepNumerator;
2174     unsigned StepDenominator = SimpleVID->StepDenominator;
2175     int64_t Addend = SimpleVID->Addend;
2176 
2177     assert(StepNumerator != 0 && "Invalid step");
2178     bool Negate = false;
2179     int64_t SplatStepVal = StepNumerator;
2180     unsigned StepOpcode = ISD::MUL;
2181     if (StepNumerator != 1) {
2182       if (isPowerOf2_64(std::abs(StepNumerator))) {
2183         Negate = StepNumerator < 0;
2184         StepOpcode = ISD::SHL;
2185         SplatStepVal = Log2_64(std::abs(StepNumerator));
2186       }
2187     }
2188 
2189     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2190     // threshold since it's the immediate value many RVV instructions accept.
2191     // There is no vmul.vi instruction so ensure multiply constant can fit in
2192     // a single addi instruction.
2193     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2194          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2195         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2196       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2197       // Convert right out of the scalable type so we can use standard ISD
2198       // nodes for the rest of the computation. If we used scalable types with
2199       // these, we'd lose the fixed-length vector info and generate worse
2200       // vsetvli code.
2201       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2202       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2203           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2204         SDValue SplatStep = DAG.getSplatVector(
2205             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2206         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2207       }
2208       if (StepDenominator != 1) {
2209         SDValue SplatStep = DAG.getSplatVector(
2210             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2211         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2212       }
2213       if (Addend != 0 || Negate) {
2214         SDValue SplatAddend =
2215             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2216         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2217       }
2218       return VID;
2219     }
2220   }
2221 
2222   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2223   // when re-interpreted as a vector with a larger element type. For example,
2224   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2225   // could be instead splat as
2226   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2227   // TODO: This optimization could also work on non-constant splats, but it
2228   // would require bit-manipulation instructions to construct the splat value.
2229   SmallVector<SDValue> Sequence;
2230   unsigned EltBitSize = VT.getScalarSizeInBits();
2231   const auto *BV = cast<BuildVectorSDNode>(Op);
2232   if (VT.isInteger() && EltBitSize < 64 &&
2233       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2234       BV->getRepeatedSequence(Sequence) &&
2235       (Sequence.size() * EltBitSize) <= 64) {
2236     unsigned SeqLen = Sequence.size();
2237     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2238     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2239     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2240             ViaIntVT == MVT::i64) &&
2241            "Unexpected sequence type");
2242 
2243     unsigned EltIdx = 0;
2244     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2245     uint64_t SplatValue = 0;
2246     // Construct the amalgamated value which can be splatted as this larger
2247     // vector type.
2248     for (const auto &SeqV : Sequence) {
2249       if (!SeqV.isUndef())
2250         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2251                        << (EltIdx * EltBitSize));
2252       EltIdx++;
2253     }
2254 
2255     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2256     // achieve better constant materializion.
2257     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2258       SplatValue = SignExtend64(SplatValue, 32);
2259 
2260     // Since we can't introduce illegal i64 types at this stage, we can only
2261     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2262     // way we can use RVV instructions to splat.
2263     assert((ViaIntVT.bitsLE(XLenVT) ||
2264             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2265            "Unexpected bitcast sequence");
2266     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2267       SDValue ViaVL =
2268           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2269       MVT ViaContainerVT =
2270           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2271       SDValue Splat =
2272           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2273                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2274       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2275       return DAG.getBitcast(VT, Splat);
2276     }
2277   }
2278 
2279   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2280   // which constitute a large proportion of the elements. In such cases we can
2281   // splat a vector with the dominant element and make up the shortfall with
2282   // INSERT_VECTOR_ELTs.
2283   // Note that this includes vectors of 2 elements by association. The
2284   // upper-most element is the "dominant" one, allowing us to use a splat to
2285   // "insert" the upper element, and an insert of the lower element at position
2286   // 0, which improves codegen.
2287   SDValue DominantValue;
2288   unsigned MostCommonCount = 0;
2289   DenseMap<SDValue, unsigned> ValueCounts;
2290   unsigned NumUndefElts =
2291       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2292 
2293   // Track the number of scalar loads we know we'd be inserting, estimated as
2294   // any non-zero floating-point constant. Other kinds of element are either
2295   // already in registers or are materialized on demand. The threshold at which
2296   // a vector load is more desirable than several scalar materializion and
2297   // vector-insertion instructions is not known.
2298   unsigned NumScalarLoads = 0;
2299 
2300   for (SDValue V : Op->op_values()) {
2301     if (V.isUndef())
2302       continue;
2303 
2304     ValueCounts.insert(std::make_pair(V, 0));
2305     unsigned &Count = ValueCounts[V];
2306 
2307     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2308       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2309 
2310     // Is this value dominant? In case of a tie, prefer the highest element as
2311     // it's cheaper to insert near the beginning of a vector than it is at the
2312     // end.
2313     if (++Count >= MostCommonCount) {
2314       DominantValue = V;
2315       MostCommonCount = Count;
2316     }
2317   }
2318 
2319   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2320   unsigned NumDefElts = NumElts - NumUndefElts;
2321   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2322 
2323   // Don't perform this optimization when optimizing for size, since
2324   // materializing elements and inserting them tends to cause code bloat.
2325   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2326       ((MostCommonCount > DominantValueCountThreshold) ||
2327        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2328     // Start by splatting the most common element.
2329     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2330 
2331     DenseSet<SDValue> Processed{DominantValue};
2332     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2333     for (const auto &OpIdx : enumerate(Op->ops())) {
2334       const SDValue &V = OpIdx.value();
2335       if (V.isUndef() || !Processed.insert(V).second)
2336         continue;
2337       if (ValueCounts[V] == 1) {
2338         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2339                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2340       } else {
2341         // Blend in all instances of this value using a VSELECT, using a
2342         // mask where each bit signals whether that element is the one
2343         // we're after.
2344         SmallVector<SDValue> Ops;
2345         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2346           return DAG.getConstant(V == V1, DL, XLenVT);
2347         });
2348         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2349                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2350                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2351       }
2352     }
2353 
2354     return Vec;
2355   }
2356 
2357   return SDValue();
2358 }
2359 
2360 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
2361                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
2362   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2363     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2364     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2365     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2366     // node in order to try and match RVV vector/scalar instructions.
2367     if ((LoC >> 31) == HiC)
2368       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
2369 
2370     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2371     // vmv.v.x whose EEW = 32 to lower it.
2372     auto *Const = dyn_cast<ConstantSDNode>(VL);
2373     if (LoC == HiC && Const && Const->isAllOnesValue() &&
2374         Const->getOpcode() != ISD::TargetConstant) {
2375       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2376       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2377       // access the subtarget here now.
2378       auto InterVec = DAG.getNode(
2379           RISCVISD::VMV_V_X_VL, DL, InterVT, Lo,
2380           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
2381       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2382     }
2383   }
2384 
2385   // Fall back to a stack store and stride x0 vector load.
2386   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2387 }
2388 
2389 // Called by type legalization to handle splat of i64 on RV32.
2390 // FIXME: We can optimize this when the type has sign or zero bits in one
2391 // of the halves.
2392 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2393                                    SDValue VL, SelectionDAG &DAG) {
2394   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2395   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2396                            DAG.getConstant(0, DL, MVT::i32));
2397   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2398                            DAG.getConstant(1, DL, MVT::i32));
2399   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2400 }
2401 
2402 // This function lowers a splat of a scalar operand Splat with the vector
2403 // length VL. It ensures the final sequence is type legal, which is useful when
2404 // lowering a splat after type legalization.
2405 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2406                                 SelectionDAG &DAG,
2407                                 const RISCVSubtarget &Subtarget) {
2408   if (VT.isFloatingPoint()) {
2409     // If VL is 1, we could use vfmv.s.f.
2410     if (isOneConstant(VL))
2411       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, DAG.getUNDEF(VT),
2412                          Scalar, VL);
2413     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2414   }
2415 
2416   MVT XLenVT = Subtarget.getXLenVT();
2417 
2418   // Simplest case is that the operand needs to be promoted to XLenVT.
2419   if (Scalar.getValueType().bitsLE(XLenVT)) {
2420     // If the operand is a constant, sign extend to increase our chances
2421     // of being able to use a .vi instruction. ANY_EXTEND would become a
2422     // a zero extend and the simm5 check in isel would fail.
2423     // FIXME: Should we ignore the upper bits in isel instead?
2424     unsigned ExtOpc =
2425         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2426     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2427     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2428     // If VL is 1 and the scalar value won't benefit from immediate, we could
2429     // use vmv.s.x.
2430     if (isOneConstant(VL) &&
2431         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2432       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT), Scalar,
2433                          VL);
2434     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2435   }
2436 
2437   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2438          "Unexpected scalar for splat lowering!");
2439 
2440   if (isOneConstant(VL) && isNullConstant(Scalar))
2441     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, DAG.getUNDEF(VT),
2442                        DAG.getConstant(0, DL, XLenVT), VL);
2443 
2444   // Otherwise use the more complicated splatting algorithm.
2445   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2446 }
2447 
2448 // Is the mask a slidedown that shifts in undefs.
2449 static int matchShuffleAsSlideDown(ArrayRef<int> Mask) {
2450   int Size = Mask.size();
2451 
2452   // Elements shifted in should be undef.
2453   auto CheckUndefs = [&](int Shift) {
2454     for (int i = Size - Shift; i != Size; ++i)
2455       if (Mask[i] >= 0)
2456         return false;
2457     return true;
2458   };
2459 
2460   // Elements should be shifted or undef.
2461   auto MatchShift = [&](int Shift) {
2462     for (int i = 0; i != Size - Shift; ++i)
2463        if (Mask[i] >= 0 && Mask[i] != Shift + i)
2464          return false;
2465     return true;
2466   };
2467 
2468   // Try all possible shifts.
2469   for (int Shift = 1; Shift != Size; ++Shift)
2470     if (CheckUndefs(Shift) && MatchShift(Shift))
2471       return Shift;
2472 
2473   // No match.
2474   return -1;
2475 }
2476 
2477 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2478                                 const RISCVSubtarget &Subtarget) {
2479   // We need to be able to widen elements to the next larger integer type.
2480   if (VT.getScalarSizeInBits() >= Subtarget.getMaxELENForFixedLengthVectors())
2481     return false;
2482 
2483   int Size = Mask.size();
2484   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2485 
2486   int Srcs[] = {-1, -1};
2487   for (int i = 0; i != Size; ++i) {
2488     // Ignore undef elements.
2489     if (Mask[i] < 0)
2490       continue;
2491 
2492     // Is this an even or odd element.
2493     int Pol = i % 2;
2494 
2495     // Ensure we consistently use the same source for this element polarity.
2496     int Src = Mask[i] / Size;
2497     if (Srcs[Pol] < 0)
2498       Srcs[Pol] = Src;
2499     if (Srcs[Pol] != Src)
2500       return false;
2501 
2502     // Make sure the element within the source is appropriate for this element
2503     // in the destination.
2504     int Elt = Mask[i] % Size;
2505     if (Elt != i / 2)
2506       return false;
2507   }
2508 
2509   // We need to find a source for each polarity and they can't be the same.
2510   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2511     return false;
2512 
2513   // Swap the sources if the second source was in the even polarity.
2514   SwapSources = Srcs[0] > Srcs[1];
2515 
2516   return true;
2517 }
2518 
2519 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2520                                    const RISCVSubtarget &Subtarget) {
2521   SDValue V1 = Op.getOperand(0);
2522   SDValue V2 = Op.getOperand(1);
2523   SDLoc DL(Op);
2524   MVT XLenVT = Subtarget.getXLenVT();
2525   MVT VT = Op.getSimpleValueType();
2526   unsigned NumElts = VT.getVectorNumElements();
2527   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2528 
2529   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2530 
2531   SDValue TrueMask, VL;
2532   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2533 
2534   if (SVN->isSplat()) {
2535     const int Lane = SVN->getSplatIndex();
2536     if (Lane >= 0) {
2537       MVT SVT = VT.getVectorElementType();
2538 
2539       // Turn splatted vector load into a strided load with an X0 stride.
2540       SDValue V = V1;
2541       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2542       // with undef.
2543       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2544       int Offset = Lane;
2545       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2546         int OpElements =
2547             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2548         V = V.getOperand(Offset / OpElements);
2549         Offset %= OpElements;
2550       }
2551 
2552       // We need to ensure the load isn't atomic or volatile.
2553       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2554         auto *Ld = cast<LoadSDNode>(V);
2555         Offset *= SVT.getStoreSize();
2556         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2557                                                    TypeSize::Fixed(Offset), DL);
2558 
2559         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2560         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2561           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2562           SDValue IntID =
2563               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2564           SDValue Ops[] = {Ld->getChain(),
2565                            IntID,
2566                            DAG.getUNDEF(ContainerVT),
2567                            NewAddr,
2568                            DAG.getRegister(RISCV::X0, XLenVT),
2569                            VL};
2570           SDValue NewLoad = DAG.getMemIntrinsicNode(
2571               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2572               DAG.getMachineFunction().getMachineMemOperand(
2573                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2574           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2575           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2576         }
2577 
2578         // Otherwise use a scalar load and splat. This will give the best
2579         // opportunity to fold a splat into the operation. ISel can turn it into
2580         // the x0 strided load if we aren't able to fold away the select.
2581         if (SVT.isFloatingPoint())
2582           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2583                           Ld->getPointerInfo().getWithOffset(Offset),
2584                           Ld->getOriginalAlign(),
2585                           Ld->getMemOperand()->getFlags());
2586         else
2587           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2588                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2589                              Ld->getOriginalAlign(),
2590                              Ld->getMemOperand()->getFlags());
2591         DAG.makeEquivalentMemoryOrdering(Ld, V);
2592 
2593         unsigned Opc =
2594             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2595         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2596         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2597       }
2598 
2599       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2600       assert(Lane < (int)NumElts && "Unexpected lane!");
2601       SDValue Gather =
2602           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2603                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2604       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2605     }
2606   }
2607 
2608   ArrayRef<int> Mask = SVN->getMask();
2609 
2610   // Try to match as a slidedown.
2611   int SlideAmt = matchShuffleAsSlideDown(Mask);
2612   if (SlideAmt >= 0) {
2613     // TODO: Should we reduce the VL to account for the upper undef elements?
2614     // Requires additional vsetvlis, but might be faster to execute.
2615     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2616     SDValue SlideDown =
2617         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2618                     DAG.getUNDEF(ContainerVT), V1,
2619                     DAG.getConstant(SlideAmt, DL, XLenVT),
2620                     TrueMask, VL);
2621     return convertFromScalableVector(VT, SlideDown, DAG, Subtarget);
2622   }
2623 
2624   // Detect an interleave shuffle and lower to
2625   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2626   bool SwapSources;
2627   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2628     // Swap sources if needed.
2629     if (SwapSources)
2630       std::swap(V1, V2);
2631 
2632     // Extract the lower half of the vectors.
2633     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2634     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2635                      DAG.getConstant(0, DL, XLenVT));
2636     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2637                      DAG.getConstant(0, DL, XLenVT));
2638 
2639     // Double the element width and halve the number of elements in an int type.
2640     unsigned EltBits = VT.getScalarSizeInBits();
2641     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2642     MVT WideIntVT =
2643         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2644     // Convert this to a scalable vector. We need to base this on the
2645     // destination size to ensure there's always a type with a smaller LMUL.
2646     MVT WideIntContainerVT =
2647         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2648 
2649     // Convert sources to scalable vectors with the same element count as the
2650     // larger type.
2651     MVT HalfContainerVT = MVT::getVectorVT(
2652         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2653     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2654     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2655 
2656     // Cast sources to integer.
2657     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2658     MVT IntHalfVT =
2659         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2660     V1 = DAG.getBitcast(IntHalfVT, V1);
2661     V2 = DAG.getBitcast(IntHalfVT, V2);
2662 
2663     // Freeze V2 since we use it twice and we need to be sure that the add and
2664     // multiply see the same value.
2665     V2 = DAG.getNode(ISD::FREEZE, DL, IntHalfVT, V2);
2666 
2667     // Recreate TrueMask using the widened type's element count.
2668     MVT MaskVT =
2669         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2670     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2671 
2672     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2673     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2674                               V2, TrueMask, VL);
2675     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2676     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2677                                      DAG.getAllOnesConstant(DL, XLenVT));
2678     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2679                                    V2, Multiplier, TrueMask, VL);
2680     // Add the new copies to our previous addition giving us 2^eltbits copies of
2681     // V2. This is equivalent to shifting V2 left by eltbits. This should
2682     // combine with the vwmulu.vv above to form vwmaccu.vv.
2683     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2684                       TrueMask, VL);
2685     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2686     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2687     // vector VT.
2688     ContainerVT =
2689         MVT::getVectorVT(VT.getVectorElementType(),
2690                          WideIntContainerVT.getVectorElementCount() * 2);
2691     Add = DAG.getBitcast(ContainerVT, Add);
2692     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2693   }
2694 
2695   // Detect shuffles which can be re-expressed as vector selects; these are
2696   // shuffles in which each element in the destination is taken from an element
2697   // at the corresponding index in either source vectors.
2698   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2699     int MaskIndex = MaskIdx.value();
2700     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2701   });
2702 
2703   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2704 
2705   SmallVector<SDValue> MaskVals;
2706   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2707   // merged with a second vrgather.
2708   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2709 
2710   // By default we preserve the original operand order, and use a mask to
2711   // select LHS as true and RHS as false. However, since RVV vector selects may
2712   // feature splats but only on the LHS, we may choose to invert our mask and
2713   // instead select between RHS and LHS.
2714   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2715   bool InvertMask = IsSelect == SwapOps;
2716 
2717   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2718   // half.
2719   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2720 
2721   // Now construct the mask that will be used by the vselect or blended
2722   // vrgather operation. For vrgathers, construct the appropriate indices into
2723   // each vector.
2724   for (int MaskIndex : Mask) {
2725     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2726     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2727     if (!IsSelect) {
2728       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2729       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2730                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2731                                      : DAG.getUNDEF(XLenVT));
2732       GatherIndicesRHS.push_back(
2733           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2734                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2735       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2736         ++LHSIndexCounts[MaskIndex];
2737       if (!IsLHSOrUndefIndex)
2738         ++RHSIndexCounts[MaskIndex - NumElts];
2739     }
2740   }
2741 
2742   if (SwapOps) {
2743     std::swap(V1, V2);
2744     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2745   }
2746 
2747   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2748   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2749   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2750 
2751   if (IsSelect)
2752     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2753 
2754   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2755     // On such a large vector we're unable to use i8 as the index type.
2756     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2757     // may involve vector splitting if we're already at LMUL=8, or our
2758     // user-supplied maximum fixed-length LMUL.
2759     return SDValue();
2760   }
2761 
2762   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2763   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2764   MVT IndexVT = VT.changeTypeToInteger();
2765   // Since we can't introduce illegal index types at this stage, use i16 and
2766   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2767   // than XLenVT.
2768   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2769     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2770     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2771   }
2772 
2773   MVT IndexContainerVT =
2774       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2775 
2776   SDValue Gather;
2777   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2778   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2779   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2780     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2781   } else {
2782     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2783     // If only one index is used, we can use a "splat" vrgather.
2784     // TODO: We can splat the most-common index and fix-up any stragglers, if
2785     // that's beneficial.
2786     if (LHSIndexCounts.size() == 1) {
2787       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2788       Gather =
2789           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2790                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2791     } else {
2792       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2793       LHSIndices =
2794           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2795 
2796       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2797                            TrueMask, VL);
2798     }
2799   }
2800 
2801   // If a second vector operand is used by this shuffle, blend it in with an
2802   // additional vrgather.
2803   if (!V2.isUndef()) {
2804     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2805     // If only one index is used, we can use a "splat" vrgather.
2806     // TODO: We can splat the most-common index and fix-up any stragglers, if
2807     // that's beneficial.
2808     if (RHSIndexCounts.size() == 1) {
2809       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2810       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2811                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2812     } else {
2813       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2814       RHSIndices =
2815           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2816       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2817                        VL);
2818     }
2819 
2820     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2821     SelectMask =
2822         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2823 
2824     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2825                          Gather, VL);
2826   }
2827 
2828   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2829 }
2830 
2831 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2832   // Support splats for any type. These should type legalize well.
2833   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2834     return true;
2835 
2836   // Only support legal VTs for other shuffles for now.
2837   if (!isTypeLegal(VT))
2838     return false;
2839 
2840   MVT SVT = VT.getSimpleVT();
2841 
2842   bool SwapSources;
2843   return (matchShuffleAsSlideDown(M) >= 0) ||
2844          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2845 }
2846 
2847 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2848                                      SDLoc DL, SelectionDAG &DAG,
2849                                      const RISCVSubtarget &Subtarget) {
2850   if (VT.isScalableVector())
2851     return DAG.getFPExtendOrRound(Op, DL, VT);
2852   assert(VT.isFixedLengthVector() &&
2853          "Unexpected value type for RVV FP extend/round lowering");
2854   SDValue Mask, VL;
2855   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2856   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2857                         ? RISCVISD::FP_EXTEND_VL
2858                         : RISCVISD::FP_ROUND_VL;
2859   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2860 }
2861 
2862 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2863 // the exponent.
2864 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2865   MVT VT = Op.getSimpleValueType();
2866   unsigned EltSize = VT.getScalarSizeInBits();
2867   SDValue Src = Op.getOperand(0);
2868   SDLoc DL(Op);
2869 
2870   // We need a FP type that can represent the value.
2871   // TODO: Use f16 for i8 when possible?
2872   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2873   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2874 
2875   // Legal types should have been checked in the RISCVTargetLowering
2876   // constructor.
2877   // TODO: Splitting may make sense in some cases.
2878   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2879          "Expected legal float type!");
2880 
2881   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2882   // The trailing zero count is equal to log2 of this single bit value.
2883   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2884     SDValue Neg =
2885         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2886     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2887   }
2888 
2889   // We have a legal FP type, convert to it.
2890   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2891   // Bitcast to integer and shift the exponent to the LSB.
2892   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2893   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2894   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2895   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2896                               DAG.getConstant(ShiftAmt, DL, IntVT));
2897   // Truncate back to original type to allow vnsrl.
2898   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2899   // The exponent contains log2 of the value in biased form.
2900   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2901 
2902   // For trailing zeros, we just need to subtract the bias.
2903   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2904     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2905                        DAG.getConstant(ExponentBias, DL, VT));
2906 
2907   // For leading zeros, we need to remove the bias and convert from log2 to
2908   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2909   unsigned Adjust = ExponentBias + (EltSize - 1);
2910   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2911 }
2912 
2913 // While RVV has alignment restrictions, we should always be able to load as a
2914 // legal equivalently-sized byte-typed vector instead. This method is
2915 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2916 // the load is already correctly-aligned, it returns SDValue().
2917 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2918                                                     SelectionDAG &DAG) const {
2919   auto *Load = cast<LoadSDNode>(Op);
2920   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2921 
2922   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2923                                      Load->getMemoryVT(),
2924                                      *Load->getMemOperand()))
2925     return SDValue();
2926 
2927   SDLoc DL(Op);
2928   MVT VT = Op.getSimpleValueType();
2929   unsigned EltSizeBits = VT.getScalarSizeInBits();
2930   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2931          "Unexpected unaligned RVV load type");
2932   MVT NewVT =
2933       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2934   assert(NewVT.isValid() &&
2935          "Expecting equally-sized RVV vector types to be legal");
2936   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2937                           Load->getPointerInfo(), Load->getOriginalAlign(),
2938                           Load->getMemOperand()->getFlags());
2939   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2940 }
2941 
2942 // While RVV has alignment restrictions, we should always be able to store as a
2943 // legal equivalently-sized byte-typed vector instead. This method is
2944 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2945 // returns SDValue() if the store is already correctly aligned.
2946 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2947                                                      SelectionDAG &DAG) const {
2948   auto *Store = cast<StoreSDNode>(Op);
2949   assert(Store && Store->getValue().getValueType().isVector() &&
2950          "Expected vector store");
2951 
2952   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2953                                      Store->getMemoryVT(),
2954                                      *Store->getMemOperand()))
2955     return SDValue();
2956 
2957   SDLoc DL(Op);
2958   SDValue StoredVal = Store->getValue();
2959   MVT VT = StoredVal.getSimpleValueType();
2960   unsigned EltSizeBits = VT.getScalarSizeInBits();
2961   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2962          "Unexpected unaligned RVV store type");
2963   MVT NewVT =
2964       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2965   assert(NewVT.isValid() &&
2966          "Expecting equally-sized RVV vector types to be legal");
2967   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2968   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2969                       Store->getPointerInfo(), Store->getOriginalAlign(),
2970                       Store->getMemOperand()->getFlags());
2971 }
2972 
2973 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2974                                             SelectionDAG &DAG) const {
2975   switch (Op.getOpcode()) {
2976   default:
2977     report_fatal_error("unimplemented operand");
2978   case ISD::GlobalAddress:
2979     return lowerGlobalAddress(Op, DAG);
2980   case ISD::BlockAddress:
2981     return lowerBlockAddress(Op, DAG);
2982   case ISD::ConstantPool:
2983     return lowerConstantPool(Op, DAG);
2984   case ISD::JumpTable:
2985     return lowerJumpTable(Op, DAG);
2986   case ISD::GlobalTLSAddress:
2987     return lowerGlobalTLSAddress(Op, DAG);
2988   case ISD::SELECT:
2989     return lowerSELECT(Op, DAG);
2990   case ISD::BRCOND:
2991     return lowerBRCOND(Op, DAG);
2992   case ISD::VASTART:
2993     return lowerVASTART(Op, DAG);
2994   case ISD::FRAMEADDR:
2995     return lowerFRAMEADDR(Op, DAG);
2996   case ISD::RETURNADDR:
2997     return lowerRETURNADDR(Op, DAG);
2998   case ISD::SHL_PARTS:
2999     return lowerShiftLeftParts(Op, DAG);
3000   case ISD::SRA_PARTS:
3001     return lowerShiftRightParts(Op, DAG, true);
3002   case ISD::SRL_PARTS:
3003     return lowerShiftRightParts(Op, DAG, false);
3004   case ISD::BITCAST: {
3005     SDLoc DL(Op);
3006     EVT VT = Op.getValueType();
3007     SDValue Op0 = Op.getOperand(0);
3008     EVT Op0VT = Op0.getValueType();
3009     MVT XLenVT = Subtarget.getXLenVT();
3010     if (VT.isFixedLengthVector()) {
3011       // We can handle fixed length vector bitcasts with a simple replacement
3012       // in isel.
3013       if (Op0VT.isFixedLengthVector())
3014         return Op;
3015       // When bitcasting from scalar to fixed-length vector, insert the scalar
3016       // into a one-element vector of the result type, and perform a vector
3017       // bitcast.
3018       if (!Op0VT.isVector()) {
3019         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3020         if (!isTypeLegal(BVT))
3021           return SDValue();
3022         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3023                                               DAG.getUNDEF(BVT), Op0,
3024                                               DAG.getConstant(0, DL, XLenVT)));
3025       }
3026       return SDValue();
3027     }
3028     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3029     // thus: bitcast the vector to a one-element vector type whose element type
3030     // is the same as the result type, and extract the first element.
3031     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3032       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3033       if (!isTypeLegal(BVT))
3034         return SDValue();
3035       SDValue BVec = DAG.getBitcast(BVT, Op0);
3036       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3037                          DAG.getConstant(0, DL, XLenVT));
3038     }
3039     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3040       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3041       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3042       return FPConv;
3043     }
3044     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3045         Subtarget.hasStdExtF()) {
3046       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3047       SDValue FPConv =
3048           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3049       return FPConv;
3050     }
3051     return SDValue();
3052   }
3053   case ISD::INTRINSIC_WO_CHAIN:
3054     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3055   case ISD::INTRINSIC_W_CHAIN:
3056     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3057   case ISD::INTRINSIC_VOID:
3058     return LowerINTRINSIC_VOID(Op, DAG);
3059   case ISD::BSWAP:
3060   case ISD::BITREVERSE: {
3061     MVT VT = Op.getSimpleValueType();
3062     SDLoc DL(Op);
3063     if (Subtarget.hasStdExtZbp()) {
3064       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3065       // Start with the maximum immediate value which is the bitwidth - 1.
3066       unsigned Imm = VT.getSizeInBits() - 1;
3067       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3068       if (Op.getOpcode() == ISD::BSWAP)
3069         Imm &= ~0x7U;
3070       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3071                          DAG.getConstant(Imm, DL, VT));
3072     }
3073     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3074     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3075     // Expand bitreverse to a bswap(rev8) followed by brev8.
3076     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3077     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3078     // as brev8 by an isel pattern.
3079     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3080                        DAG.getConstant(7, DL, VT));
3081   }
3082   case ISD::FSHL:
3083   case ISD::FSHR: {
3084     MVT VT = Op.getSimpleValueType();
3085     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3086     SDLoc DL(Op);
3087     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3088     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3089     // accidentally setting the extra bit.
3090     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3091     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3092                                 DAG.getConstant(ShAmtWidth, DL, VT));
3093     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3094     // instruction use different orders. fshl will return its first operand for
3095     // shift of zero, fshr will return its second operand. fsl and fsr both
3096     // return rs1 so the ISD nodes need to have different operand orders.
3097     // Shift amount is in rs2.
3098     SDValue Op0 = Op.getOperand(0);
3099     SDValue Op1 = Op.getOperand(1);
3100     unsigned Opc = RISCVISD::FSL;
3101     if (Op.getOpcode() == ISD::FSHR) {
3102       std::swap(Op0, Op1);
3103       Opc = RISCVISD::FSR;
3104     }
3105     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3106   }
3107   case ISD::TRUNCATE: {
3108     SDLoc DL(Op);
3109     MVT VT = Op.getSimpleValueType();
3110     // Only custom-lower vector truncates
3111     if (!VT.isVector())
3112       return Op;
3113 
3114     // Truncates to mask types are handled differently
3115     if (VT.getVectorElementType() == MVT::i1)
3116       return lowerVectorMaskTrunc(Op, DAG);
3117 
3118     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3119     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3120     // truncate by one power of two at a time.
3121     MVT DstEltVT = VT.getVectorElementType();
3122 
3123     SDValue Src = Op.getOperand(0);
3124     MVT SrcVT = Src.getSimpleValueType();
3125     MVT SrcEltVT = SrcVT.getVectorElementType();
3126 
3127     assert(DstEltVT.bitsLT(SrcEltVT) &&
3128            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3129            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3130            "Unexpected vector truncate lowering");
3131 
3132     MVT ContainerVT = SrcVT;
3133     if (SrcVT.isFixedLengthVector()) {
3134       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3135       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3136     }
3137 
3138     SDValue Result = Src;
3139     SDValue Mask, VL;
3140     std::tie(Mask, VL) =
3141         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3142     LLVMContext &Context = *DAG.getContext();
3143     const ElementCount Count = ContainerVT.getVectorElementCount();
3144     do {
3145       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3146       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3147       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3148                            Mask, VL);
3149     } while (SrcEltVT != DstEltVT);
3150 
3151     if (SrcVT.isFixedLengthVector())
3152       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3153 
3154     return Result;
3155   }
3156   case ISD::ANY_EXTEND:
3157   case ISD::ZERO_EXTEND:
3158     if (Op.getOperand(0).getValueType().isVector() &&
3159         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3160       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3161     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3162   case ISD::SIGN_EXTEND:
3163     if (Op.getOperand(0).getValueType().isVector() &&
3164         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3165       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3166     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3167   case ISD::SPLAT_VECTOR_PARTS:
3168     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3169   case ISD::INSERT_VECTOR_ELT:
3170     return lowerINSERT_VECTOR_ELT(Op, DAG);
3171   case ISD::EXTRACT_VECTOR_ELT:
3172     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3173   case ISD::VSCALE: {
3174     MVT VT = Op.getSimpleValueType();
3175     SDLoc DL(Op);
3176     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3177     // We define our scalable vector types for lmul=1 to use a 64 bit known
3178     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3179     // vscale as VLENB / 8.
3180     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3181     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3182       report_fatal_error("Support for VLEN==32 is incomplete.");
3183     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3184       // We assume VLENB is a multiple of 8. We manually choose the best shift
3185       // here because SimplifyDemandedBits isn't always able to simplify it.
3186       uint64_t Val = Op.getConstantOperandVal(0);
3187       if (isPowerOf2_64(Val)) {
3188         uint64_t Log2 = Log2_64(Val);
3189         if (Log2 < 3)
3190           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3191                              DAG.getConstant(3 - Log2, DL, VT));
3192         if (Log2 > 3)
3193           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3194                              DAG.getConstant(Log2 - 3, DL, VT));
3195         return VLENB;
3196       }
3197       // If the multiplier is a multiple of 8, scale it down to avoid needing
3198       // to shift the VLENB value.
3199       if ((Val % 8) == 0)
3200         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3201                            DAG.getConstant(Val / 8, DL, VT));
3202     }
3203 
3204     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3205                                  DAG.getConstant(3, DL, VT));
3206     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3207   }
3208   case ISD::FPOWI: {
3209     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3210     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3211     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3212         Op.getOperand(1).getValueType() == MVT::i32) {
3213       SDLoc DL(Op);
3214       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3215       SDValue Powi =
3216           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3217       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3218                          DAG.getIntPtrConstant(0, DL));
3219     }
3220     return SDValue();
3221   }
3222   case ISD::FP_EXTEND: {
3223     // RVV can only do fp_extend to types double the size as the source. We
3224     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3225     // via f32.
3226     SDLoc DL(Op);
3227     MVT VT = Op.getSimpleValueType();
3228     SDValue Src = Op.getOperand(0);
3229     MVT SrcVT = Src.getSimpleValueType();
3230 
3231     // Prepare any fixed-length vector operands.
3232     MVT ContainerVT = VT;
3233     if (SrcVT.isFixedLengthVector()) {
3234       ContainerVT = getContainerForFixedLengthVector(VT);
3235       MVT SrcContainerVT =
3236           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3237       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3238     }
3239 
3240     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3241         SrcVT.getVectorElementType() != MVT::f16) {
3242       // For scalable vectors, we only need to close the gap between
3243       // vXf16->vXf64.
3244       if (!VT.isFixedLengthVector())
3245         return Op;
3246       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3247       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3248       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3249     }
3250 
3251     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3252     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3253     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3254         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3255 
3256     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3257                                            DL, DAG, Subtarget);
3258     if (VT.isFixedLengthVector())
3259       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3260     return Extend;
3261   }
3262   case ISD::FP_ROUND: {
3263     // RVV can only do fp_round to types half the size as the source. We
3264     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3265     // conversion instruction.
3266     SDLoc DL(Op);
3267     MVT VT = Op.getSimpleValueType();
3268     SDValue Src = Op.getOperand(0);
3269     MVT SrcVT = Src.getSimpleValueType();
3270 
3271     // Prepare any fixed-length vector operands.
3272     MVT ContainerVT = VT;
3273     if (VT.isFixedLengthVector()) {
3274       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3275       ContainerVT =
3276           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3277       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3278     }
3279 
3280     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3281         SrcVT.getVectorElementType() != MVT::f64) {
3282       // For scalable vectors, we only need to close the gap between
3283       // vXf64<->vXf16.
3284       if (!VT.isFixedLengthVector())
3285         return Op;
3286       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3287       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3288       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3289     }
3290 
3291     SDValue Mask, VL;
3292     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3293 
3294     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3295     SDValue IntermediateRound =
3296         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3297     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3298                                           DL, DAG, Subtarget);
3299 
3300     if (VT.isFixedLengthVector())
3301       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3302     return Round;
3303   }
3304   case ISD::FP_TO_SINT:
3305   case ISD::FP_TO_UINT:
3306   case ISD::SINT_TO_FP:
3307   case ISD::UINT_TO_FP: {
3308     // RVV can only do fp<->int conversions to types half/double the size as
3309     // the source. We custom-lower any conversions that do two hops into
3310     // sequences.
3311     MVT VT = Op.getSimpleValueType();
3312     if (!VT.isVector())
3313       return Op;
3314     SDLoc DL(Op);
3315     SDValue Src = Op.getOperand(0);
3316     MVT EltVT = VT.getVectorElementType();
3317     MVT SrcVT = Src.getSimpleValueType();
3318     MVT SrcEltVT = SrcVT.getVectorElementType();
3319     unsigned EltSize = EltVT.getSizeInBits();
3320     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3321     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3322            "Unexpected vector element types");
3323 
3324     bool IsInt2FP = SrcEltVT.isInteger();
3325     // Widening conversions
3326     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
3327       if (IsInt2FP) {
3328         // Do a regular integer sign/zero extension then convert to float.
3329         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
3330                                       VT.getVectorElementCount());
3331         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3332                                  ? ISD::ZERO_EXTEND
3333                                  : ISD::SIGN_EXTEND;
3334         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3335         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3336       }
3337       // FP2Int
3338       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3339       // Do one doubling fp_extend then complete the operation by converting
3340       // to int.
3341       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3342       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3343       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3344     }
3345 
3346     // Narrowing conversions
3347     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
3348       if (IsInt2FP) {
3349         // One narrowing int_to_fp, then an fp_round.
3350         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3351         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3352         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3353         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3354       }
3355       // FP2Int
3356       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3357       // representable by the integer, the result is poison.
3358       MVT IVecVT =
3359           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
3360                            VT.getVectorElementCount());
3361       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3362       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3363     }
3364 
3365     // Scalable vectors can exit here. Patterns will handle equally-sized
3366     // conversions halving/doubling ones.
3367     if (!VT.isFixedLengthVector())
3368       return Op;
3369 
3370     // For fixed-length vectors we lower to a custom "VL" node.
3371     unsigned RVVOpc = 0;
3372     switch (Op.getOpcode()) {
3373     default:
3374       llvm_unreachable("Impossible opcode");
3375     case ISD::FP_TO_SINT:
3376       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3377       break;
3378     case ISD::FP_TO_UINT:
3379       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3380       break;
3381     case ISD::SINT_TO_FP:
3382       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3383       break;
3384     case ISD::UINT_TO_FP:
3385       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3386       break;
3387     }
3388 
3389     MVT ContainerVT, SrcContainerVT;
3390     // Derive the reference container type from the larger vector type.
3391     if (SrcEltSize > EltSize) {
3392       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3393       ContainerVT =
3394           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3395     } else {
3396       ContainerVT = getContainerForFixedLengthVector(VT);
3397       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3398     }
3399 
3400     SDValue Mask, VL;
3401     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3402 
3403     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3404     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3405     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3406   }
3407   case ISD::FP_TO_SINT_SAT:
3408   case ISD::FP_TO_UINT_SAT:
3409     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3410   case ISD::FTRUNC:
3411   case ISD::FCEIL:
3412   case ISD::FFLOOR:
3413     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3414   case ISD::FROUND:
3415     return lowerFROUND(Op, DAG);
3416   case ISD::VECREDUCE_ADD:
3417   case ISD::VECREDUCE_UMAX:
3418   case ISD::VECREDUCE_SMAX:
3419   case ISD::VECREDUCE_UMIN:
3420   case ISD::VECREDUCE_SMIN:
3421     return lowerVECREDUCE(Op, DAG);
3422   case ISD::VECREDUCE_AND:
3423   case ISD::VECREDUCE_OR:
3424   case ISD::VECREDUCE_XOR:
3425     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3426       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3427     return lowerVECREDUCE(Op, DAG);
3428   case ISD::VECREDUCE_FADD:
3429   case ISD::VECREDUCE_SEQ_FADD:
3430   case ISD::VECREDUCE_FMIN:
3431   case ISD::VECREDUCE_FMAX:
3432     return lowerFPVECREDUCE(Op, DAG);
3433   case ISD::VP_REDUCE_ADD:
3434   case ISD::VP_REDUCE_UMAX:
3435   case ISD::VP_REDUCE_SMAX:
3436   case ISD::VP_REDUCE_UMIN:
3437   case ISD::VP_REDUCE_SMIN:
3438   case ISD::VP_REDUCE_FADD:
3439   case ISD::VP_REDUCE_SEQ_FADD:
3440   case ISD::VP_REDUCE_FMIN:
3441   case ISD::VP_REDUCE_FMAX:
3442     return lowerVPREDUCE(Op, DAG);
3443   case ISD::VP_REDUCE_AND:
3444   case ISD::VP_REDUCE_OR:
3445   case ISD::VP_REDUCE_XOR:
3446     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3447       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3448     return lowerVPREDUCE(Op, DAG);
3449   case ISD::INSERT_SUBVECTOR:
3450     return lowerINSERT_SUBVECTOR(Op, DAG);
3451   case ISD::EXTRACT_SUBVECTOR:
3452     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3453   case ISD::STEP_VECTOR:
3454     return lowerSTEP_VECTOR(Op, DAG);
3455   case ISD::VECTOR_REVERSE:
3456     return lowerVECTOR_REVERSE(Op, DAG);
3457   case ISD::BUILD_VECTOR:
3458     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3459   case ISD::SPLAT_VECTOR:
3460     if (Op.getValueType().getVectorElementType() == MVT::i1)
3461       return lowerVectorMaskSplat(Op, DAG);
3462     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
3463   case ISD::VECTOR_SHUFFLE:
3464     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3465   case ISD::CONCAT_VECTORS: {
3466     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3467     // better than going through the stack, as the default expansion does.
3468     SDLoc DL(Op);
3469     MVT VT = Op.getSimpleValueType();
3470     unsigned NumOpElts =
3471         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3472     SDValue Vec = DAG.getUNDEF(VT);
3473     for (const auto &OpIdx : enumerate(Op->ops())) {
3474       SDValue SubVec = OpIdx.value();
3475       // Don't insert undef subvectors.
3476       if (SubVec.isUndef())
3477         continue;
3478       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3479                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3480     }
3481     return Vec;
3482   }
3483   case ISD::LOAD:
3484     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3485       return V;
3486     if (Op.getValueType().isFixedLengthVector())
3487       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3488     return Op;
3489   case ISD::STORE:
3490     if (auto V = expandUnalignedRVVStore(Op, DAG))
3491       return V;
3492     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3493       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3494     return Op;
3495   case ISD::MLOAD:
3496   case ISD::VP_LOAD:
3497     return lowerMaskedLoad(Op, DAG);
3498   case ISD::MSTORE:
3499   case ISD::VP_STORE:
3500     return lowerMaskedStore(Op, DAG);
3501   case ISD::SETCC:
3502     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3503   case ISD::ADD:
3504     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3505   case ISD::SUB:
3506     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3507   case ISD::MUL:
3508     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3509   case ISD::MULHS:
3510     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3511   case ISD::MULHU:
3512     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3513   case ISD::AND:
3514     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3515                                               RISCVISD::AND_VL);
3516   case ISD::OR:
3517     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3518                                               RISCVISD::OR_VL);
3519   case ISD::XOR:
3520     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3521                                               RISCVISD::XOR_VL);
3522   case ISD::SDIV:
3523     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3524   case ISD::SREM:
3525     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3526   case ISD::UDIV:
3527     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3528   case ISD::UREM:
3529     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3530   case ISD::SHL:
3531   case ISD::SRA:
3532   case ISD::SRL:
3533     if (Op.getSimpleValueType().isFixedLengthVector())
3534       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3535     // This can be called for an i32 shift amount that needs to be promoted.
3536     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3537            "Unexpected custom legalisation");
3538     return SDValue();
3539   case ISD::SADDSAT:
3540     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3541   case ISD::UADDSAT:
3542     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3543   case ISD::SSUBSAT:
3544     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3545   case ISD::USUBSAT:
3546     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3547   case ISD::FADD:
3548     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3549   case ISD::FSUB:
3550     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3551   case ISD::FMUL:
3552     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3553   case ISD::FDIV:
3554     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3555   case ISD::FNEG:
3556     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3557   case ISD::FABS:
3558     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3559   case ISD::FSQRT:
3560     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3561   case ISD::FMA:
3562     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3563   case ISD::SMIN:
3564     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3565   case ISD::SMAX:
3566     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3567   case ISD::UMIN:
3568     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3569   case ISD::UMAX:
3570     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3571   case ISD::FMINNUM:
3572     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3573   case ISD::FMAXNUM:
3574     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3575   case ISD::ABS:
3576     return lowerABS(Op, DAG);
3577   case ISD::CTLZ_ZERO_UNDEF:
3578   case ISD::CTTZ_ZERO_UNDEF:
3579     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3580   case ISD::VSELECT:
3581     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3582   case ISD::FCOPYSIGN:
3583     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3584   case ISD::MGATHER:
3585   case ISD::VP_GATHER:
3586     return lowerMaskedGather(Op, DAG);
3587   case ISD::MSCATTER:
3588   case ISD::VP_SCATTER:
3589     return lowerMaskedScatter(Op, DAG);
3590   case ISD::FLT_ROUNDS_:
3591     return lowerGET_ROUNDING(Op, DAG);
3592   case ISD::SET_ROUNDING:
3593     return lowerSET_ROUNDING(Op, DAG);
3594   case ISD::VP_SELECT:
3595     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3596   case ISD::VP_MERGE:
3597     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3598   case ISD::VP_ADD:
3599     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3600   case ISD::VP_SUB:
3601     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3602   case ISD::VP_MUL:
3603     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3604   case ISD::VP_SDIV:
3605     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3606   case ISD::VP_UDIV:
3607     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3608   case ISD::VP_SREM:
3609     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3610   case ISD::VP_UREM:
3611     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3612   case ISD::VP_AND:
3613     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3614   case ISD::VP_OR:
3615     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3616   case ISD::VP_XOR:
3617     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3618   case ISD::VP_ASHR:
3619     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3620   case ISD::VP_LSHR:
3621     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3622   case ISD::VP_SHL:
3623     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3624   case ISD::VP_FADD:
3625     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3626   case ISD::VP_FSUB:
3627     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3628   case ISD::VP_FMUL:
3629     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3630   case ISD::VP_FDIV:
3631     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3632   case ISD::VP_FNEG:
3633     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3634   case ISD::VP_FMA:
3635     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3636   }
3637 }
3638 
3639 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3640                              SelectionDAG &DAG, unsigned Flags) {
3641   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3642 }
3643 
3644 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3645                              SelectionDAG &DAG, unsigned Flags) {
3646   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3647                                    Flags);
3648 }
3649 
3650 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3651                              SelectionDAG &DAG, unsigned Flags) {
3652   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3653                                    N->getOffset(), Flags);
3654 }
3655 
3656 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3657                              SelectionDAG &DAG, unsigned Flags) {
3658   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3659 }
3660 
3661 template <class NodeTy>
3662 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3663                                      bool IsLocal) const {
3664   SDLoc DL(N);
3665   EVT Ty = getPointerTy(DAG.getDataLayout());
3666 
3667   if (isPositionIndependent()) {
3668     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3669     if (IsLocal)
3670       // Use PC-relative addressing to access the symbol. This generates the
3671       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3672       // %pcrel_lo(auipc)).
3673       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3674 
3675     // Use PC-relative addressing to access the GOT for this symbol, then load
3676     // the address from the GOT. This generates the pattern (PseudoLA sym),
3677     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3678     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3679   }
3680 
3681   switch (getTargetMachine().getCodeModel()) {
3682   default:
3683     report_fatal_error("Unsupported code model for lowering");
3684   case CodeModel::Small: {
3685     // Generate a sequence for accessing addresses within the first 2 GiB of
3686     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3687     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3688     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3689     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3690     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3691   }
3692   case CodeModel::Medium: {
3693     // Generate a sequence for accessing addresses within any 2GiB range within
3694     // the address space. This generates the pattern (PseudoLLA sym), which
3695     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3696     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3697     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3698   }
3699   }
3700 }
3701 
3702 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3703                                                 SelectionDAG &DAG) const {
3704   SDLoc DL(Op);
3705   EVT Ty = Op.getValueType();
3706   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3707   int64_t Offset = N->getOffset();
3708   MVT XLenVT = Subtarget.getXLenVT();
3709 
3710   const GlobalValue *GV = N->getGlobal();
3711   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3712   SDValue Addr = getAddr(N, DAG, IsLocal);
3713 
3714   // In order to maximise the opportunity for common subexpression elimination,
3715   // emit a separate ADD node for the global address offset instead of folding
3716   // it in the global address node. Later peephole optimisations may choose to
3717   // fold it back in when profitable.
3718   if (Offset != 0)
3719     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3720                        DAG.getConstant(Offset, DL, XLenVT));
3721   return Addr;
3722 }
3723 
3724 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3725                                                SelectionDAG &DAG) const {
3726   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3727 
3728   return getAddr(N, DAG);
3729 }
3730 
3731 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3732                                                SelectionDAG &DAG) const {
3733   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3734 
3735   return getAddr(N, DAG);
3736 }
3737 
3738 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3739                                             SelectionDAG &DAG) const {
3740   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3741 
3742   return getAddr(N, DAG);
3743 }
3744 
3745 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3746                                               SelectionDAG &DAG,
3747                                               bool UseGOT) const {
3748   SDLoc DL(N);
3749   EVT Ty = getPointerTy(DAG.getDataLayout());
3750   const GlobalValue *GV = N->getGlobal();
3751   MVT XLenVT = Subtarget.getXLenVT();
3752 
3753   if (UseGOT) {
3754     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3755     // load the address from the GOT and add the thread pointer. This generates
3756     // the pattern (PseudoLA_TLS_IE sym), which expands to
3757     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3758     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3759     SDValue Load =
3760         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3761 
3762     // Add the thread pointer.
3763     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3764     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3765   }
3766 
3767   // Generate a sequence for accessing the address relative to the thread
3768   // pointer, with the appropriate adjustment for the thread pointer offset.
3769   // This generates the pattern
3770   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3771   SDValue AddrHi =
3772       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3773   SDValue AddrAdd =
3774       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3775   SDValue AddrLo =
3776       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3777 
3778   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3779   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3780   SDValue MNAdd = SDValue(
3781       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3782       0);
3783   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3784 }
3785 
3786 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3787                                                SelectionDAG &DAG) const {
3788   SDLoc DL(N);
3789   EVT Ty = getPointerTy(DAG.getDataLayout());
3790   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3791   const GlobalValue *GV = N->getGlobal();
3792 
3793   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3794   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3795   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3796   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3797   SDValue Load =
3798       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3799 
3800   // Prepare argument list to generate call.
3801   ArgListTy Args;
3802   ArgListEntry Entry;
3803   Entry.Node = Load;
3804   Entry.Ty = CallTy;
3805   Args.push_back(Entry);
3806 
3807   // Setup call to __tls_get_addr.
3808   TargetLowering::CallLoweringInfo CLI(DAG);
3809   CLI.setDebugLoc(DL)
3810       .setChain(DAG.getEntryNode())
3811       .setLibCallee(CallingConv::C, CallTy,
3812                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3813                     std::move(Args));
3814 
3815   return LowerCallTo(CLI).first;
3816 }
3817 
3818 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3819                                                    SelectionDAG &DAG) const {
3820   SDLoc DL(Op);
3821   EVT Ty = Op.getValueType();
3822   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3823   int64_t Offset = N->getOffset();
3824   MVT XLenVT = Subtarget.getXLenVT();
3825 
3826   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3827 
3828   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3829       CallingConv::GHC)
3830     report_fatal_error("In GHC calling convention TLS is not supported");
3831 
3832   SDValue Addr;
3833   switch (Model) {
3834   case TLSModel::LocalExec:
3835     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3836     break;
3837   case TLSModel::InitialExec:
3838     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3839     break;
3840   case TLSModel::LocalDynamic:
3841   case TLSModel::GeneralDynamic:
3842     Addr = getDynamicTLSAddr(N, DAG);
3843     break;
3844   }
3845 
3846   // In order to maximise the opportunity for common subexpression elimination,
3847   // emit a separate ADD node for the global address offset instead of folding
3848   // it in the global address node. Later peephole optimisations may choose to
3849   // fold it back in when profitable.
3850   if (Offset != 0)
3851     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3852                        DAG.getConstant(Offset, DL, XLenVT));
3853   return Addr;
3854 }
3855 
3856 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3857   SDValue CondV = Op.getOperand(0);
3858   SDValue TrueV = Op.getOperand(1);
3859   SDValue FalseV = Op.getOperand(2);
3860   SDLoc DL(Op);
3861   MVT VT = Op.getSimpleValueType();
3862   MVT XLenVT = Subtarget.getXLenVT();
3863 
3864   // Lower vector SELECTs to VSELECTs by splatting the condition.
3865   if (VT.isVector()) {
3866     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3867     SDValue CondSplat = VT.isScalableVector()
3868                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3869                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3870     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3871   }
3872 
3873   // If the result type is XLenVT and CondV is the output of a SETCC node
3874   // which also operated on XLenVT inputs, then merge the SETCC node into the
3875   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3876   // compare+branch instructions. i.e.:
3877   // (select (setcc lhs, rhs, cc), truev, falsev)
3878   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3879   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3880       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3881     SDValue LHS = CondV.getOperand(0);
3882     SDValue RHS = CondV.getOperand(1);
3883     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3884     ISD::CondCode CCVal = CC->get();
3885 
3886     // Special case for a select of 2 constants that have a diffence of 1.
3887     // Normally this is done by DAGCombine, but if the select is introduced by
3888     // type legalization or op legalization, we miss it. Restricting to SETLT
3889     // case for now because that is what signed saturating add/sub need.
3890     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3891     // but we would probably want to swap the true/false values if the condition
3892     // is SETGE/SETLE to avoid an XORI.
3893     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3894         CCVal == ISD::SETLT) {
3895       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3896       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3897       if (TrueVal - 1 == FalseVal)
3898         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3899       if (TrueVal + 1 == FalseVal)
3900         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3901     }
3902 
3903     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3904 
3905     SDValue TargetCC = DAG.getCondCode(CCVal);
3906     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3907     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3908   }
3909 
3910   // Otherwise:
3911   // (select condv, truev, falsev)
3912   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3913   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3914   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3915 
3916   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3917 
3918   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3919 }
3920 
3921 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3922   SDValue CondV = Op.getOperand(1);
3923   SDLoc DL(Op);
3924   MVT XLenVT = Subtarget.getXLenVT();
3925 
3926   if (CondV.getOpcode() == ISD::SETCC &&
3927       CondV.getOperand(0).getValueType() == XLenVT) {
3928     SDValue LHS = CondV.getOperand(0);
3929     SDValue RHS = CondV.getOperand(1);
3930     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3931 
3932     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3933 
3934     SDValue TargetCC = DAG.getCondCode(CCVal);
3935     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3936                        LHS, RHS, TargetCC, Op.getOperand(2));
3937   }
3938 
3939   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3940                      CondV, DAG.getConstant(0, DL, XLenVT),
3941                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3942 }
3943 
3944 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3945   MachineFunction &MF = DAG.getMachineFunction();
3946   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3947 
3948   SDLoc DL(Op);
3949   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3950                                  getPointerTy(MF.getDataLayout()));
3951 
3952   // vastart just stores the address of the VarArgsFrameIndex slot into the
3953   // memory location argument.
3954   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3955   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3956                       MachinePointerInfo(SV));
3957 }
3958 
3959 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3960                                             SelectionDAG &DAG) const {
3961   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3962   MachineFunction &MF = DAG.getMachineFunction();
3963   MachineFrameInfo &MFI = MF.getFrameInfo();
3964   MFI.setFrameAddressIsTaken(true);
3965   Register FrameReg = RI.getFrameRegister(MF);
3966   int XLenInBytes = Subtarget.getXLen() / 8;
3967 
3968   EVT VT = Op.getValueType();
3969   SDLoc DL(Op);
3970   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3971   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3972   while (Depth--) {
3973     int Offset = -(XLenInBytes * 2);
3974     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3975                               DAG.getIntPtrConstant(Offset, DL));
3976     FrameAddr =
3977         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3978   }
3979   return FrameAddr;
3980 }
3981 
3982 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3983                                              SelectionDAG &DAG) const {
3984   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3985   MachineFunction &MF = DAG.getMachineFunction();
3986   MachineFrameInfo &MFI = MF.getFrameInfo();
3987   MFI.setReturnAddressIsTaken(true);
3988   MVT XLenVT = Subtarget.getXLenVT();
3989   int XLenInBytes = Subtarget.getXLen() / 8;
3990 
3991   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3992     return SDValue();
3993 
3994   EVT VT = Op.getValueType();
3995   SDLoc DL(Op);
3996   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3997   if (Depth) {
3998     int Off = -XLenInBytes;
3999     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4000     SDValue Offset = DAG.getConstant(Off, DL, VT);
4001     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4002                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4003                        MachinePointerInfo());
4004   }
4005 
4006   // Return the value of the return address register, marking it an implicit
4007   // live-in.
4008   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4009   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4010 }
4011 
4012 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4013                                                  SelectionDAG &DAG) const {
4014   SDLoc DL(Op);
4015   SDValue Lo = Op.getOperand(0);
4016   SDValue Hi = Op.getOperand(1);
4017   SDValue Shamt = Op.getOperand(2);
4018   EVT VT = Lo.getValueType();
4019 
4020   // if Shamt-XLEN < 0: // Shamt < XLEN
4021   //   Lo = Lo << Shamt
4022   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
4023   // else:
4024   //   Lo = 0
4025   //   Hi = Lo << (Shamt-XLEN)
4026 
4027   SDValue Zero = DAG.getConstant(0, DL, VT);
4028   SDValue One = DAG.getConstant(1, DL, VT);
4029   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4030   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4031   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4032   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
4033 
4034   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4035   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4036   SDValue ShiftRightLo =
4037       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4038   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4039   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4040   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4041 
4042   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4043 
4044   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4045   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4046 
4047   SDValue Parts[2] = {Lo, Hi};
4048   return DAG.getMergeValues(Parts, DL);
4049 }
4050 
4051 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4052                                                   bool IsSRA) const {
4053   SDLoc DL(Op);
4054   SDValue Lo = Op.getOperand(0);
4055   SDValue Hi = Op.getOperand(1);
4056   SDValue Shamt = Op.getOperand(2);
4057   EVT VT = Lo.getValueType();
4058 
4059   // SRA expansion:
4060   //   if Shamt-XLEN < 0: // Shamt < XLEN
4061   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
4062   //     Hi = Hi >>s Shamt
4063   //   else:
4064   //     Lo = Hi >>s (Shamt-XLEN);
4065   //     Hi = Hi >>s (XLEN-1)
4066   //
4067   // SRL expansion:
4068   //   if Shamt-XLEN < 0: // Shamt < XLEN
4069   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
4070   //     Hi = Hi >>u Shamt
4071   //   else:
4072   //     Lo = Hi >>u (Shamt-XLEN);
4073   //     Hi = 0;
4074 
4075   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4076 
4077   SDValue Zero = DAG.getConstant(0, DL, VT);
4078   SDValue One = DAG.getConstant(1, DL, VT);
4079   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4080   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4081   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4082   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
4083 
4084   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4085   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4086   SDValue ShiftLeftHi =
4087       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4088   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4089   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4090   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4091   SDValue HiFalse =
4092       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4093 
4094   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4095 
4096   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4097   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4098 
4099   SDValue Parts[2] = {Lo, Hi};
4100   return DAG.getMergeValues(Parts, DL);
4101 }
4102 
4103 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4104 // legal equivalently-sized i8 type, so we can use that as a go-between.
4105 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4106                                                   SelectionDAG &DAG) const {
4107   SDLoc DL(Op);
4108   MVT VT = Op.getSimpleValueType();
4109   SDValue SplatVal = Op.getOperand(0);
4110   // All-zeros or all-ones splats are handled specially.
4111   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4112     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4113     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4114   }
4115   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4116     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4117     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4118   }
4119   MVT XLenVT = Subtarget.getXLenVT();
4120   assert(SplatVal.getValueType() == XLenVT &&
4121          "Unexpected type for i1 splat value");
4122   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4123   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4124                          DAG.getConstant(1, DL, XLenVT));
4125   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4126   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4127   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4128 }
4129 
4130 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4131 // illegal (currently only vXi64 RV32).
4132 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4133 // them to VMV_V_X_VL.
4134 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4135                                                      SelectionDAG &DAG) const {
4136   SDLoc DL(Op);
4137   MVT VecVT = Op.getSimpleValueType();
4138   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4139          "Unexpected SPLAT_VECTOR_PARTS lowering");
4140 
4141   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4142   SDValue Lo = Op.getOperand(0);
4143   SDValue Hi = Op.getOperand(1);
4144 
4145   if (VecVT.isFixedLengthVector()) {
4146     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4147     SDLoc DL(Op);
4148     SDValue Mask, VL;
4149     std::tie(Mask, VL) =
4150         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4151 
4152     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
4153     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4154   }
4155 
4156   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4157     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4158     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4159     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4160     // node in order to try and match RVV vector/scalar instructions.
4161     if ((LoC >> 31) == HiC)
4162       return DAG.getNode(
4163           RISCVISD::VMV_V_X_VL, DL, VecVT, Lo,
4164           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4165   }
4166 
4167   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4168   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4169       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4170       Hi.getConstantOperandVal(1) == 31)
4171     return DAG.getNode(
4172         RISCVISD::VMV_V_X_VL, DL, VecVT, Lo,
4173         DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4174 
4175   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4176   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
4177                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i32));
4178 }
4179 
4180 // Custom-lower extensions from mask vectors by using a vselect either with 1
4181 // for zero/any-extension or -1 for sign-extension:
4182 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4183 // Note that any-extension is lowered identically to zero-extension.
4184 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4185                                                 int64_t ExtTrueVal) const {
4186   SDLoc DL(Op);
4187   MVT VecVT = Op.getSimpleValueType();
4188   SDValue Src = Op.getOperand(0);
4189   // Only custom-lower extensions from mask types
4190   assert(Src.getValueType().isVector() &&
4191          Src.getValueType().getVectorElementType() == MVT::i1);
4192 
4193   MVT XLenVT = Subtarget.getXLenVT();
4194   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4195   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4196 
4197   if (VecVT.isScalableVector()) {
4198     // Be careful not to introduce illegal scalar types at this stage, and be
4199     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
4200     // illegal and must be expanded. Since we know that the constants are
4201     // sign-extended 32-bit values, we use VMV_V_X_VL directly.
4202     bool IsRV32E64 =
4203         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
4204 
4205     if (!IsRV32E64) {
4206       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
4207       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
4208     } else {
4209       SplatZero =
4210           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatZero,
4211                       DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
4212       SplatTrueVal =
4213           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, SplatTrueVal,
4214                       DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
4215     }
4216 
4217     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4218   }
4219 
4220   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4221   MVT I1ContainerVT =
4222       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4223 
4224   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4225 
4226   SDValue Mask, VL;
4227   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4228 
4229   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
4230   SplatTrueVal =
4231       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
4232   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4233                                SplatTrueVal, SplatZero, VL);
4234 
4235   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4236 }
4237 
4238 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4239     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4240   MVT ExtVT = Op.getSimpleValueType();
4241   // Only custom-lower extensions from fixed-length vector types.
4242   if (!ExtVT.isFixedLengthVector())
4243     return Op;
4244   MVT VT = Op.getOperand(0).getSimpleValueType();
4245   // Grab the canonical container type for the extended type. Infer the smaller
4246   // type from that to ensure the same number of vector elements, as we know
4247   // the LMUL will be sufficient to hold the smaller type.
4248   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4249   // Get the extended container type manually to ensure the same number of
4250   // vector elements between source and dest.
4251   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4252                                      ContainerExtVT.getVectorElementCount());
4253 
4254   SDValue Op1 =
4255       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4256 
4257   SDLoc DL(Op);
4258   SDValue Mask, VL;
4259   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4260 
4261   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4262 
4263   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4264 }
4265 
4266 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4267 // setcc operation:
4268 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4269 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4270                                                   SelectionDAG &DAG) const {
4271   SDLoc DL(Op);
4272   EVT MaskVT = Op.getValueType();
4273   // Only expect to custom-lower truncations to mask types
4274   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4275          "Unexpected type for vector mask lowering");
4276   SDValue Src = Op.getOperand(0);
4277   MVT VecVT = Src.getSimpleValueType();
4278 
4279   // If this is a fixed vector, we need to convert it to a scalable vector.
4280   MVT ContainerVT = VecVT;
4281   if (VecVT.isFixedLengthVector()) {
4282     ContainerVT = getContainerForFixedLengthVector(VecVT);
4283     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4284   }
4285 
4286   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4287   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4288 
4289   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
4290   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
4291 
4292   if (VecVT.isScalableVector()) {
4293     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4294     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4295   }
4296 
4297   SDValue Mask, VL;
4298   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4299 
4300   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4301   SDValue Trunc =
4302       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4303   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4304                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4305   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4306 }
4307 
4308 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4309 // first position of a vector, and that vector is slid up to the insert index.
4310 // By limiting the active vector length to index+1 and merging with the
4311 // original vector (with an undisturbed tail policy for elements >= VL), we
4312 // achieve the desired result of leaving all elements untouched except the one
4313 // at VL-1, which is replaced with the desired value.
4314 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4315                                                     SelectionDAG &DAG) const {
4316   SDLoc DL(Op);
4317   MVT VecVT = Op.getSimpleValueType();
4318   SDValue Vec = Op.getOperand(0);
4319   SDValue Val = Op.getOperand(1);
4320   SDValue Idx = Op.getOperand(2);
4321 
4322   if (VecVT.getVectorElementType() == MVT::i1) {
4323     // FIXME: For now we just promote to an i8 vector and insert into that,
4324     // but this is probably not optimal.
4325     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4326     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4327     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4328     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4329   }
4330 
4331   MVT ContainerVT = VecVT;
4332   // If the operand is a fixed-length vector, convert to a scalable one.
4333   if (VecVT.isFixedLengthVector()) {
4334     ContainerVT = getContainerForFixedLengthVector(VecVT);
4335     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4336   }
4337 
4338   MVT XLenVT = Subtarget.getXLenVT();
4339 
4340   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4341   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4342   // Even i64-element vectors on RV32 can be lowered without scalar
4343   // legalization if the most-significant 32 bits of the value are not affected
4344   // by the sign-extension of the lower 32 bits.
4345   // TODO: We could also catch sign extensions of a 32-bit value.
4346   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4347     const auto *CVal = cast<ConstantSDNode>(Val);
4348     if (isInt<32>(CVal->getSExtValue())) {
4349       IsLegalInsert = true;
4350       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4351     }
4352   }
4353 
4354   SDValue Mask, VL;
4355   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4356 
4357   SDValue ValInVec;
4358 
4359   if (IsLegalInsert) {
4360     unsigned Opc =
4361         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4362     if (isNullConstant(Idx)) {
4363       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4364       if (!VecVT.isFixedLengthVector())
4365         return Vec;
4366       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4367     }
4368     ValInVec =
4369         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4370   } else {
4371     // On RV32, i64-element vectors must be specially handled to place the
4372     // value at element 0, by using two vslide1up instructions in sequence on
4373     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4374     // this.
4375     SDValue One = DAG.getConstant(1, DL, XLenVT);
4376     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4377     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4378     MVT I32ContainerVT =
4379         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4380     SDValue I32Mask =
4381         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4382     // Limit the active VL to two.
4383     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4384     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4385     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4386     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
4387                            InsertI64VL);
4388     // First slide in the hi value, then the lo in underneath it.
4389     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4390                            ValHi, I32Mask, InsertI64VL);
4391     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
4392                            ValLo, I32Mask, InsertI64VL);
4393     // Bitcast back to the right container type.
4394     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4395   }
4396 
4397   // Now that the value is in a vector, slide it into position.
4398   SDValue InsertVL =
4399       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4400   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4401                                 ValInVec, Idx, Mask, InsertVL);
4402   if (!VecVT.isFixedLengthVector())
4403     return Slideup;
4404   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4405 }
4406 
4407 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4408 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4409 // types this is done using VMV_X_S to allow us to glean information about the
4410 // sign bits of the result.
4411 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4412                                                      SelectionDAG &DAG) const {
4413   SDLoc DL(Op);
4414   SDValue Idx = Op.getOperand(1);
4415   SDValue Vec = Op.getOperand(0);
4416   EVT EltVT = Op.getValueType();
4417   MVT VecVT = Vec.getSimpleValueType();
4418   MVT XLenVT = Subtarget.getXLenVT();
4419 
4420   if (VecVT.getVectorElementType() == MVT::i1) {
4421     if (VecVT.isFixedLengthVector()) {
4422       unsigned NumElts = VecVT.getVectorNumElements();
4423       if (NumElts >= 8) {
4424         MVT WideEltVT;
4425         unsigned WidenVecLen;
4426         SDValue ExtractElementIdx;
4427         SDValue ExtractBitIdx;
4428         unsigned MaxEEW = Subtarget.getMaxELENForFixedLengthVectors();
4429         MVT LargestEltVT = MVT::getIntegerVT(
4430             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4431         if (NumElts <= LargestEltVT.getSizeInBits()) {
4432           assert(isPowerOf2_32(NumElts) &&
4433                  "the number of elements should be power of 2");
4434           WideEltVT = MVT::getIntegerVT(NumElts);
4435           WidenVecLen = 1;
4436           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4437           ExtractBitIdx = Idx;
4438         } else {
4439           WideEltVT = LargestEltVT;
4440           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4441           // extract element index = index / element width
4442           ExtractElementIdx = DAG.getNode(
4443               ISD::SRL, DL, XLenVT, Idx,
4444               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4445           // mask bit index = index % element width
4446           ExtractBitIdx = DAG.getNode(
4447               ISD::AND, DL, XLenVT, Idx,
4448               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4449         }
4450         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4451         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4452         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4453                                          Vec, ExtractElementIdx);
4454         // Extract the bit from GPR.
4455         SDValue ShiftRight =
4456             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4457         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4458                            DAG.getConstant(1, DL, XLenVT));
4459       }
4460     }
4461     // Otherwise, promote to an i8 vector and extract from that.
4462     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4463     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4464     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4465   }
4466 
4467   // If this is a fixed vector, we need to convert it to a scalable vector.
4468   MVT ContainerVT = VecVT;
4469   if (VecVT.isFixedLengthVector()) {
4470     ContainerVT = getContainerForFixedLengthVector(VecVT);
4471     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4472   }
4473 
4474   // If the index is 0, the vector is already in the right position.
4475   if (!isNullConstant(Idx)) {
4476     // Use a VL of 1 to avoid processing more elements than we need.
4477     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4478     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4479     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4480     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4481                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4482   }
4483 
4484   if (!EltVT.isInteger()) {
4485     // Floating-point extracts are handled in TableGen.
4486     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4487                        DAG.getConstant(0, DL, XLenVT));
4488   }
4489 
4490   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4491   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4492 }
4493 
4494 // Some RVV intrinsics may claim that they want an integer operand to be
4495 // promoted or expanded.
4496 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
4497                                           const RISCVSubtarget &Subtarget) {
4498   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4499           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4500          "Unexpected opcode");
4501 
4502   if (!Subtarget.hasVInstructions())
4503     return SDValue();
4504 
4505   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4506   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4507   SDLoc DL(Op);
4508 
4509   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4510       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4511   if (!II || !II->hasSplatOperand())
4512     return SDValue();
4513 
4514   unsigned SplatOp = II->SplatOperand + 1 + HasChain;
4515   assert(SplatOp < Op.getNumOperands());
4516 
4517   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4518   SDValue &ScalarOp = Operands[SplatOp];
4519   MVT OpVT = ScalarOp.getSimpleValueType();
4520   MVT XLenVT = Subtarget.getXLenVT();
4521 
4522   // If this isn't a scalar, or its type is XLenVT we're done.
4523   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4524     return SDValue();
4525 
4526   // Simplest case is that the operand needs to be promoted to XLenVT.
4527   if (OpVT.bitsLT(XLenVT)) {
4528     // If the operand is a constant, sign extend to increase our chances
4529     // of being able to use a .vi instruction. ANY_EXTEND would become a
4530     // a zero extend and the simm5 check in isel would fail.
4531     // FIXME: Should we ignore the upper bits in isel instead?
4532     unsigned ExtOpc =
4533         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4534     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4535     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4536   }
4537 
4538   // Use the previous operand to get the vXi64 VT. The result might be a mask
4539   // VT for compares. Using the previous operand assumes that the previous
4540   // operand will never have a smaller element size than a scalar operand and
4541   // that a widening operation never uses SEW=64.
4542   // NOTE: If this fails the below assert, we can probably just find the
4543   // element count from any operand or result and use it to construct the VT.
4544   assert(II->SplatOperand > 0 && "Unexpected splat operand!");
4545   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4546 
4547   // The more complex case is when the scalar is larger than XLenVT.
4548   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4549          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4550 
4551   // If this is a sign-extended 32-bit constant, we can truncate it and rely
4552   // on the instruction to sign-extend since SEW>XLEN.
4553   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
4554     if (isInt<32>(CVal->getSExtValue())) {
4555       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4556       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4557     }
4558   }
4559 
4560   // We need to convert the scalar to a splat vector.
4561   // FIXME: Can we implicitly truncate the scalar if it is known to
4562   // be sign extended?
4563   SDValue VL = getVLOperand(Op);
4564   assert(VL.getValueType() == XLenVT);
4565   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
4566   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4567 }
4568 
4569 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4570                                                      SelectionDAG &DAG) const {
4571   unsigned IntNo = Op.getConstantOperandVal(0);
4572   SDLoc DL(Op);
4573   MVT XLenVT = Subtarget.getXLenVT();
4574 
4575   switch (IntNo) {
4576   default:
4577     break; // Don't custom lower most intrinsics.
4578   case Intrinsic::thread_pointer: {
4579     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4580     return DAG.getRegister(RISCV::X4, PtrVT);
4581   }
4582   case Intrinsic::riscv_orc_b:
4583   case Intrinsic::riscv_brev8: {
4584     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4585     unsigned Opc =
4586         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4587     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4588                        DAG.getConstant(7, DL, XLenVT));
4589   }
4590   case Intrinsic::riscv_grev:
4591   case Intrinsic::riscv_gorc: {
4592     unsigned Opc =
4593         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4594     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4595   }
4596   case Intrinsic::riscv_zip:
4597   case Intrinsic::riscv_unzip: {
4598     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4599     // For i32 the immdiate is 15. For i64 the immediate is 31.
4600     unsigned Opc =
4601         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4602     unsigned BitWidth = Op.getValueSizeInBits();
4603     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4604     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4605                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4606   }
4607   case Intrinsic::riscv_shfl:
4608   case Intrinsic::riscv_unshfl: {
4609     unsigned Opc =
4610         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4611     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4612   }
4613   case Intrinsic::riscv_bcompress:
4614   case Intrinsic::riscv_bdecompress: {
4615     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4616                                                        : RISCVISD::BDECOMPRESS;
4617     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4618   }
4619   case Intrinsic::riscv_bfp:
4620     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4621                        Op.getOperand(2));
4622   case Intrinsic::riscv_fsl:
4623     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4624                        Op.getOperand(2), Op.getOperand(3));
4625   case Intrinsic::riscv_fsr:
4626     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4627                        Op.getOperand(2), Op.getOperand(3));
4628   case Intrinsic::riscv_vmv_x_s:
4629     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4630     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4631                        Op.getOperand(1));
4632   case Intrinsic::riscv_vmv_v_x:
4633     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4634                             Op.getSimpleValueType(), DL, DAG, Subtarget);
4635   case Intrinsic::riscv_vfmv_v_f:
4636     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4637                        Op.getOperand(1), Op.getOperand(2));
4638   case Intrinsic::riscv_vmv_s_x: {
4639     SDValue Scalar = Op.getOperand(2);
4640 
4641     if (Scalar.getValueType().bitsLE(XLenVT)) {
4642       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4643       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4644                          Op.getOperand(1), Scalar, Op.getOperand(3));
4645     }
4646 
4647     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4648 
4649     // This is an i64 value that lives in two scalar registers. We have to
4650     // insert this in a convoluted way. First we build vXi64 splat containing
4651     // the/ two values that we assemble using some bit math. Next we'll use
4652     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4653     // to merge element 0 from our splat into the source vector.
4654     // FIXME: This is probably not the best way to do this, but it is
4655     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4656     // point.
4657     //   sw lo, (a0)
4658     //   sw hi, 4(a0)
4659     //   vlse vX, (a0)
4660     //
4661     //   vid.v      vVid
4662     //   vmseq.vx   mMask, vVid, 0
4663     //   vmerge.vvm vDest, vSrc, vVal, mMask
4664     MVT VT = Op.getSimpleValueType();
4665     SDValue Vec = Op.getOperand(1);
4666     SDValue VL = getVLOperand(Op);
4667 
4668     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
4669     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4670                                       DAG.getConstant(0, DL, MVT::i32), VL);
4671 
4672     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4673     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4674     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4675     SDValue SelectCond =
4676         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4677                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4678     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4679                        Vec, VL);
4680   }
4681   case Intrinsic::riscv_vslide1up:
4682   case Intrinsic::riscv_vslide1down:
4683   case Intrinsic::riscv_vslide1up_mask:
4684   case Intrinsic::riscv_vslide1down_mask: {
4685     // We need to special case these when the scalar is larger than XLen.
4686     unsigned NumOps = Op.getNumOperands();
4687     bool IsMasked = NumOps == 7;
4688     unsigned OpOffset = IsMasked ? 1 : 0;
4689     SDValue Scalar = Op.getOperand(2 + OpOffset);
4690     if (Scalar.getValueType().bitsLE(XLenVT))
4691       break;
4692 
4693     // Splatting a sign extended constant is fine.
4694     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
4695       if (isInt<32>(CVal->getSExtValue()))
4696         break;
4697 
4698     MVT VT = Op.getSimpleValueType();
4699     assert(VT.getVectorElementType() == MVT::i64 &&
4700            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
4701 
4702     // Convert the vector source to the equivalent nxvXi32 vector.
4703     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4704     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
4705 
4706     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4707                                    DAG.getConstant(0, DL, XLenVT));
4708     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
4709                                    DAG.getConstant(1, DL, XLenVT));
4710 
4711     // Double the VL since we halved SEW.
4712     SDValue VL = getVLOperand(Op);
4713     SDValue I32VL =
4714         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4715 
4716     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4717     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
4718 
4719     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4720     // instructions.
4721     if (IntNo == Intrinsic::riscv_vslide1up ||
4722         IntNo == Intrinsic::riscv_vslide1up_mask) {
4723       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
4724                         I32Mask, I32VL);
4725       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
4726                         I32Mask, I32VL);
4727     } else {
4728       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
4729                         I32Mask, I32VL);
4730       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
4731                         I32Mask, I32VL);
4732     }
4733 
4734     // Convert back to nxvXi64.
4735     Vec = DAG.getBitcast(VT, Vec);
4736 
4737     if (!IsMasked)
4738       return Vec;
4739 
4740     // Apply mask after the operation.
4741     SDValue Mask = Op.getOperand(NumOps - 3);
4742     SDValue MaskedOff = Op.getOperand(1);
4743     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
4744   }
4745   }
4746 
4747   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4748 }
4749 
4750 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4751                                                     SelectionDAG &DAG) const {
4752   unsigned IntNo = Op.getConstantOperandVal(1);
4753   switch (IntNo) {
4754   default:
4755     break;
4756   case Intrinsic::riscv_masked_strided_load: {
4757     SDLoc DL(Op);
4758     MVT XLenVT = Subtarget.getXLenVT();
4759 
4760     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4761     // the selection of the masked intrinsics doesn't do this for us.
4762     SDValue Mask = Op.getOperand(5);
4763     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4764 
4765     MVT VT = Op->getSimpleValueType(0);
4766     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4767 
4768     SDValue PassThru = Op.getOperand(2);
4769     if (!IsUnmasked) {
4770       MVT MaskVT =
4771           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4772       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4773       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4774     }
4775 
4776     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4777 
4778     SDValue IntID = DAG.getTargetConstant(
4779         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4780         XLenVT);
4781 
4782     auto *Load = cast<MemIntrinsicSDNode>(Op);
4783     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4784     if (IsUnmasked)
4785       Ops.push_back(DAG.getUNDEF(ContainerVT));
4786     else
4787       Ops.push_back(PassThru);
4788     Ops.push_back(Op.getOperand(3)); // Ptr
4789     Ops.push_back(Op.getOperand(4)); // Stride
4790     if (!IsUnmasked)
4791       Ops.push_back(Mask);
4792     Ops.push_back(VL);
4793     if (!IsUnmasked) {
4794       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4795       Ops.push_back(Policy);
4796     }
4797 
4798     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4799     SDValue Result =
4800         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4801                                 Load->getMemoryVT(), Load->getMemOperand());
4802     SDValue Chain = Result.getValue(1);
4803     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4804     return DAG.getMergeValues({Result, Chain}, DL);
4805   }
4806   }
4807 
4808   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4809 }
4810 
4811 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4812                                                  SelectionDAG &DAG) const {
4813   unsigned IntNo = Op.getConstantOperandVal(1);
4814   switch (IntNo) {
4815   default:
4816     break;
4817   case Intrinsic::riscv_masked_strided_store: {
4818     SDLoc DL(Op);
4819     MVT XLenVT = Subtarget.getXLenVT();
4820 
4821     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4822     // the selection of the masked intrinsics doesn't do this for us.
4823     SDValue Mask = Op.getOperand(5);
4824     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4825 
4826     SDValue Val = Op.getOperand(2);
4827     MVT VT = Val.getSimpleValueType();
4828     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4829 
4830     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4831     if (!IsUnmasked) {
4832       MVT MaskVT =
4833           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4834       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4835     }
4836 
4837     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4838 
4839     SDValue IntID = DAG.getTargetConstant(
4840         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4841         XLenVT);
4842 
4843     auto *Store = cast<MemIntrinsicSDNode>(Op);
4844     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4845     Ops.push_back(Val);
4846     Ops.push_back(Op.getOperand(3)); // Ptr
4847     Ops.push_back(Op.getOperand(4)); // Stride
4848     if (!IsUnmasked)
4849       Ops.push_back(Mask);
4850     Ops.push_back(VL);
4851 
4852     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4853                                    Ops, Store->getMemoryVT(),
4854                                    Store->getMemOperand());
4855   }
4856   }
4857 
4858   return SDValue();
4859 }
4860 
4861 static MVT getLMUL1VT(MVT VT) {
4862   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4863          "Unexpected vector MVT");
4864   return MVT::getScalableVectorVT(
4865       VT.getVectorElementType(),
4866       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4867 }
4868 
4869 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4870   switch (ISDOpcode) {
4871   default:
4872     llvm_unreachable("Unhandled reduction");
4873   case ISD::VECREDUCE_ADD:
4874     return RISCVISD::VECREDUCE_ADD_VL;
4875   case ISD::VECREDUCE_UMAX:
4876     return RISCVISD::VECREDUCE_UMAX_VL;
4877   case ISD::VECREDUCE_SMAX:
4878     return RISCVISD::VECREDUCE_SMAX_VL;
4879   case ISD::VECREDUCE_UMIN:
4880     return RISCVISD::VECREDUCE_UMIN_VL;
4881   case ISD::VECREDUCE_SMIN:
4882     return RISCVISD::VECREDUCE_SMIN_VL;
4883   case ISD::VECREDUCE_AND:
4884     return RISCVISD::VECREDUCE_AND_VL;
4885   case ISD::VECREDUCE_OR:
4886     return RISCVISD::VECREDUCE_OR_VL;
4887   case ISD::VECREDUCE_XOR:
4888     return RISCVISD::VECREDUCE_XOR_VL;
4889   }
4890 }
4891 
4892 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4893                                                          SelectionDAG &DAG,
4894                                                          bool IsVP) const {
4895   SDLoc DL(Op);
4896   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4897   MVT VecVT = Vec.getSimpleValueType();
4898   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4899           Op.getOpcode() == ISD::VECREDUCE_OR ||
4900           Op.getOpcode() == ISD::VECREDUCE_XOR ||
4901           Op.getOpcode() == ISD::VP_REDUCE_AND ||
4902           Op.getOpcode() == ISD::VP_REDUCE_OR ||
4903           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4904          "Unexpected reduction lowering");
4905 
4906   MVT XLenVT = Subtarget.getXLenVT();
4907   assert(Op.getValueType() == XLenVT &&
4908          "Expected reduction output to be legalized to XLenVT");
4909 
4910   MVT ContainerVT = VecVT;
4911   if (VecVT.isFixedLengthVector()) {
4912     ContainerVT = getContainerForFixedLengthVector(VecVT);
4913     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4914   }
4915 
4916   SDValue Mask, VL;
4917   if (IsVP) {
4918     Mask = Op.getOperand(2);
4919     VL = Op.getOperand(3);
4920   } else {
4921     std::tie(Mask, VL) =
4922         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4923   }
4924 
4925   unsigned BaseOpc;
4926   ISD::CondCode CC;
4927   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4928 
4929   switch (Op.getOpcode()) {
4930   default:
4931     llvm_unreachable("Unhandled reduction");
4932   case ISD::VECREDUCE_AND:
4933   case ISD::VP_REDUCE_AND: {
4934     // vcpop ~x == 0
4935     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4936     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4937     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4938     CC = ISD::SETEQ;
4939     BaseOpc = ISD::AND;
4940     break;
4941   }
4942   case ISD::VECREDUCE_OR:
4943   case ISD::VP_REDUCE_OR:
4944     // vcpop x != 0
4945     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4946     CC = ISD::SETNE;
4947     BaseOpc = ISD::OR;
4948     break;
4949   case ISD::VECREDUCE_XOR:
4950   case ISD::VP_REDUCE_XOR: {
4951     // ((vcpop x) & 1) != 0
4952     SDValue One = DAG.getConstant(1, DL, XLenVT);
4953     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
4954     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4955     CC = ISD::SETNE;
4956     BaseOpc = ISD::XOR;
4957     break;
4958   }
4959   }
4960 
4961   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4962 
4963   if (!IsVP)
4964     return SetCC;
4965 
4966   // Now include the start value in the operation.
4967   // Note that we must return the start value when no elements are operated
4968   // upon. The vcpop instructions we've emitted in each case above will return
4969   // 0 for an inactive vector, and so we've already received the neutral value:
4970   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4971   // can simply include the start value.
4972   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4973 }
4974 
4975 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4976                                             SelectionDAG &DAG) const {
4977   SDLoc DL(Op);
4978   SDValue Vec = Op.getOperand(0);
4979   EVT VecEVT = Vec.getValueType();
4980 
4981   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4982 
4983   // Due to ordering in legalize types we may have a vector type that needs to
4984   // be split. Do that manually so we can get down to a legal type.
4985   while (getTypeAction(*DAG.getContext(), VecEVT) ==
4986          TargetLowering::TypeSplitVector) {
4987     SDValue Lo, Hi;
4988     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4989     VecEVT = Lo.getValueType();
4990     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4991   }
4992 
4993   // TODO: The type may need to be widened rather than split. Or widened before
4994   // it can be split.
4995   if (!isTypeLegal(VecEVT))
4996     return SDValue();
4997 
4998   MVT VecVT = VecEVT.getSimpleVT();
4999   MVT VecEltVT = VecVT.getVectorElementType();
5000   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5001 
5002   MVT ContainerVT = VecVT;
5003   if (VecVT.isFixedLengthVector()) {
5004     ContainerVT = getContainerForFixedLengthVector(VecVT);
5005     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5006   }
5007 
5008   MVT M1VT = getLMUL1VT(ContainerVT);
5009   MVT XLenVT = Subtarget.getXLenVT();
5010 
5011   SDValue Mask, VL;
5012   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5013 
5014   SDValue NeutralElem =
5015       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5016   SDValue IdentitySplat = lowerScalarSplat(
5017       NeutralElem, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
5018   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5019                                   IdentitySplat, Mask, VL);
5020   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5021                              DAG.getConstant(0, DL, XLenVT));
5022   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5023 }
5024 
5025 // Given a reduction op, this function returns the matching reduction opcode,
5026 // the vector SDValue and the scalar SDValue required to lower this to a
5027 // RISCVISD node.
5028 static std::tuple<unsigned, SDValue, SDValue>
5029 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5030   SDLoc DL(Op);
5031   auto Flags = Op->getFlags();
5032   unsigned Opcode = Op.getOpcode();
5033   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5034   switch (Opcode) {
5035   default:
5036     llvm_unreachable("Unhandled reduction");
5037   case ISD::VECREDUCE_FADD: {
5038     // Use positive zero if we can. It is cheaper to materialize.
5039     SDValue Zero =
5040         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5041     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5042   }
5043   case ISD::VECREDUCE_SEQ_FADD:
5044     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5045                            Op.getOperand(0));
5046   case ISD::VECREDUCE_FMIN:
5047     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5048                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5049   case ISD::VECREDUCE_FMAX:
5050     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5051                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5052   }
5053 }
5054 
5055 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5056                                               SelectionDAG &DAG) const {
5057   SDLoc DL(Op);
5058   MVT VecEltVT = Op.getSimpleValueType();
5059 
5060   unsigned RVVOpcode;
5061   SDValue VectorVal, ScalarVal;
5062   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5063       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5064   MVT VecVT = VectorVal.getSimpleValueType();
5065 
5066   MVT ContainerVT = VecVT;
5067   if (VecVT.isFixedLengthVector()) {
5068     ContainerVT = getContainerForFixedLengthVector(VecVT);
5069     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5070   }
5071 
5072   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5073   MVT XLenVT = Subtarget.getXLenVT();
5074 
5075   SDValue Mask, VL;
5076   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5077 
5078   SDValue ScalarSplat = lowerScalarSplat(
5079       ScalarVal, DAG.getConstant(1, DL, XLenVT), M1VT, DL, DAG, Subtarget);
5080   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5081                                   VectorVal, ScalarSplat, Mask, VL);
5082   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5083                      DAG.getConstant(0, DL, XLenVT));
5084 }
5085 
5086 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5087   switch (ISDOpcode) {
5088   default:
5089     llvm_unreachable("Unhandled reduction");
5090   case ISD::VP_REDUCE_ADD:
5091     return RISCVISD::VECREDUCE_ADD_VL;
5092   case ISD::VP_REDUCE_UMAX:
5093     return RISCVISD::VECREDUCE_UMAX_VL;
5094   case ISD::VP_REDUCE_SMAX:
5095     return RISCVISD::VECREDUCE_SMAX_VL;
5096   case ISD::VP_REDUCE_UMIN:
5097     return RISCVISD::VECREDUCE_UMIN_VL;
5098   case ISD::VP_REDUCE_SMIN:
5099     return RISCVISD::VECREDUCE_SMIN_VL;
5100   case ISD::VP_REDUCE_AND:
5101     return RISCVISD::VECREDUCE_AND_VL;
5102   case ISD::VP_REDUCE_OR:
5103     return RISCVISD::VECREDUCE_OR_VL;
5104   case ISD::VP_REDUCE_XOR:
5105     return RISCVISD::VECREDUCE_XOR_VL;
5106   case ISD::VP_REDUCE_FADD:
5107     return RISCVISD::VECREDUCE_FADD_VL;
5108   case ISD::VP_REDUCE_SEQ_FADD:
5109     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5110   case ISD::VP_REDUCE_FMAX:
5111     return RISCVISD::VECREDUCE_FMAX_VL;
5112   case ISD::VP_REDUCE_FMIN:
5113     return RISCVISD::VECREDUCE_FMIN_VL;
5114   }
5115 }
5116 
5117 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5118                                            SelectionDAG &DAG) const {
5119   SDLoc DL(Op);
5120   SDValue Vec = Op.getOperand(1);
5121   EVT VecEVT = Vec.getValueType();
5122 
5123   // TODO: The type may need to be widened rather than split. Or widened before
5124   // it can be split.
5125   if (!isTypeLegal(VecEVT))
5126     return SDValue();
5127 
5128   MVT VecVT = VecEVT.getSimpleVT();
5129   MVT VecEltVT = VecVT.getVectorElementType();
5130   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5131 
5132   MVT ContainerVT = VecVT;
5133   if (VecVT.isFixedLengthVector()) {
5134     ContainerVT = getContainerForFixedLengthVector(VecVT);
5135     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5136   }
5137 
5138   SDValue VL = Op.getOperand(3);
5139   SDValue Mask = Op.getOperand(2);
5140 
5141   MVT M1VT = getLMUL1VT(ContainerVT);
5142   MVT XLenVT = Subtarget.getXLenVT();
5143   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5144 
5145   SDValue StartSplat =
5146       lowerScalarSplat(Op.getOperand(0), DAG.getConstant(1, DL, XLenVT), M1VT,
5147                        DL, DAG, Subtarget);
5148   SDValue Reduction =
5149       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5150   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5151                              DAG.getConstant(0, DL, XLenVT));
5152   if (!VecVT.isInteger())
5153     return Elt0;
5154   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5155 }
5156 
5157 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5158                                                    SelectionDAG &DAG) const {
5159   SDValue Vec = Op.getOperand(0);
5160   SDValue SubVec = Op.getOperand(1);
5161   MVT VecVT = Vec.getSimpleValueType();
5162   MVT SubVecVT = SubVec.getSimpleValueType();
5163 
5164   SDLoc DL(Op);
5165   MVT XLenVT = Subtarget.getXLenVT();
5166   unsigned OrigIdx = Op.getConstantOperandVal(2);
5167   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5168 
5169   // We don't have the ability to slide mask vectors up indexed by their i1
5170   // elements; the smallest we can do is i8. Often we are able to bitcast to
5171   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5172   // into a scalable one, we might not necessarily have enough scalable
5173   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5174   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5175       (OrigIdx != 0 || !Vec.isUndef())) {
5176     if (VecVT.getVectorMinNumElements() >= 8 &&
5177         SubVecVT.getVectorMinNumElements() >= 8) {
5178       assert(OrigIdx % 8 == 0 && "Invalid index");
5179       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5180              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5181              "Unexpected mask vector lowering");
5182       OrigIdx /= 8;
5183       SubVecVT =
5184           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5185                            SubVecVT.isScalableVector());
5186       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5187                                VecVT.isScalableVector());
5188       Vec = DAG.getBitcast(VecVT, Vec);
5189       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5190     } else {
5191       // We can't slide this mask vector up indexed by its i1 elements.
5192       // This poses a problem when we wish to insert a scalable vector which
5193       // can't be re-expressed as a larger type. Just choose the slow path and
5194       // extend to a larger type, then truncate back down.
5195       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5196       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5197       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5198       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5199       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5200                         Op.getOperand(2));
5201       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5202       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5203     }
5204   }
5205 
5206   // If the subvector vector is a fixed-length type, we cannot use subregister
5207   // manipulation to simplify the codegen; we don't know which register of a
5208   // LMUL group contains the specific subvector as we only know the minimum
5209   // register size. Therefore we must slide the vector group up the full
5210   // amount.
5211   if (SubVecVT.isFixedLengthVector()) {
5212     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5213       return Op;
5214     MVT ContainerVT = VecVT;
5215     if (VecVT.isFixedLengthVector()) {
5216       ContainerVT = getContainerForFixedLengthVector(VecVT);
5217       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5218     }
5219     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5220                          DAG.getUNDEF(ContainerVT), SubVec,
5221                          DAG.getConstant(0, DL, XLenVT));
5222     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5223       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5224       return DAG.getBitcast(Op.getValueType(), SubVec);
5225     }
5226     SDValue Mask =
5227         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5228     // Set the vector length to only the number of elements we care about. Note
5229     // that for slideup this includes the offset.
5230     SDValue VL =
5231         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5232     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5233     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5234                                   SubVec, SlideupAmt, Mask, VL);
5235     if (VecVT.isFixedLengthVector())
5236       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5237     return DAG.getBitcast(Op.getValueType(), Slideup);
5238   }
5239 
5240   unsigned SubRegIdx, RemIdx;
5241   std::tie(SubRegIdx, RemIdx) =
5242       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5243           VecVT, SubVecVT, OrigIdx, TRI);
5244 
5245   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5246   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5247                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5248                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5249 
5250   // 1. If the Idx has been completely eliminated and this subvector's size is
5251   // a vector register or a multiple thereof, or the surrounding elements are
5252   // undef, then this is a subvector insert which naturally aligns to a vector
5253   // register. These can easily be handled using subregister manipulation.
5254   // 2. If the subvector is smaller than a vector register, then the insertion
5255   // must preserve the undisturbed elements of the register. We do this by
5256   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5257   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5258   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5259   // LMUL=1 type back into the larger vector (resolving to another subregister
5260   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5261   // to avoid allocating a large register group to hold our subvector.
5262   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5263     return Op;
5264 
5265   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5266   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5267   // (in our case undisturbed). This means we can set up a subvector insertion
5268   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5269   // size of the subvector.
5270   MVT InterSubVT = VecVT;
5271   SDValue AlignedExtract = Vec;
5272   unsigned AlignedIdx = OrigIdx - RemIdx;
5273   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5274     InterSubVT = getLMUL1VT(VecVT);
5275     // Extract a subvector equal to the nearest full vector register type. This
5276     // should resolve to a EXTRACT_SUBREG instruction.
5277     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5278                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5279   }
5280 
5281   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5282   // For scalable vectors this must be further multiplied by vscale.
5283   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5284 
5285   SDValue Mask, VL;
5286   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5287 
5288   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5289   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5290   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5291   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5292 
5293   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5294                        DAG.getUNDEF(InterSubVT), SubVec,
5295                        DAG.getConstant(0, DL, XLenVT));
5296 
5297   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5298                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5299 
5300   // If required, insert this subvector back into the correct vector register.
5301   // This should resolve to an INSERT_SUBREG instruction.
5302   if (VecVT.bitsGT(InterSubVT))
5303     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5304                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5305 
5306   // We might have bitcast from a mask type: cast back to the original type if
5307   // required.
5308   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5309 }
5310 
5311 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5312                                                     SelectionDAG &DAG) const {
5313   SDValue Vec = Op.getOperand(0);
5314   MVT SubVecVT = Op.getSimpleValueType();
5315   MVT VecVT = Vec.getSimpleValueType();
5316 
5317   SDLoc DL(Op);
5318   MVT XLenVT = Subtarget.getXLenVT();
5319   unsigned OrigIdx = Op.getConstantOperandVal(1);
5320   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5321 
5322   // We don't have the ability to slide mask vectors down indexed by their i1
5323   // elements; the smallest we can do is i8. Often we are able to bitcast to
5324   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5325   // from a scalable one, we might not necessarily have enough scalable
5326   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5327   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5328     if (VecVT.getVectorMinNumElements() >= 8 &&
5329         SubVecVT.getVectorMinNumElements() >= 8) {
5330       assert(OrigIdx % 8 == 0 && "Invalid index");
5331       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5332              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5333              "Unexpected mask vector lowering");
5334       OrigIdx /= 8;
5335       SubVecVT =
5336           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5337                            SubVecVT.isScalableVector());
5338       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5339                                VecVT.isScalableVector());
5340       Vec = DAG.getBitcast(VecVT, Vec);
5341     } else {
5342       // We can't slide this mask vector down, indexed by its i1 elements.
5343       // This poses a problem when we wish to extract a scalable vector which
5344       // can't be re-expressed as a larger type. Just choose the slow path and
5345       // extend to a larger type, then truncate back down.
5346       // TODO: We could probably improve this when extracting certain fixed
5347       // from fixed, where we can extract as i8 and shift the correct element
5348       // right to reach the desired subvector?
5349       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5350       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5351       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5352       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5353                         Op.getOperand(1));
5354       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5355       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5356     }
5357   }
5358 
5359   // If the subvector vector is a fixed-length type, we cannot use subregister
5360   // manipulation to simplify the codegen; we don't know which register of a
5361   // LMUL group contains the specific subvector as we only know the minimum
5362   // register size. Therefore we must slide the vector group down the full
5363   // amount.
5364   if (SubVecVT.isFixedLengthVector()) {
5365     // With an index of 0 this is a cast-like subvector, which can be performed
5366     // with subregister operations.
5367     if (OrigIdx == 0)
5368       return Op;
5369     MVT ContainerVT = VecVT;
5370     if (VecVT.isFixedLengthVector()) {
5371       ContainerVT = getContainerForFixedLengthVector(VecVT);
5372       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5373     }
5374     SDValue Mask =
5375         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5376     // Set the vector length to only the number of elements we care about. This
5377     // avoids sliding down elements we're going to discard straight away.
5378     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5379     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5380     SDValue Slidedown =
5381         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5382                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5383     // Now we can use a cast-like subvector extract to get the result.
5384     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5385                             DAG.getConstant(0, DL, XLenVT));
5386     return DAG.getBitcast(Op.getValueType(), Slidedown);
5387   }
5388 
5389   unsigned SubRegIdx, RemIdx;
5390   std::tie(SubRegIdx, RemIdx) =
5391       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5392           VecVT, SubVecVT, OrigIdx, TRI);
5393 
5394   // If the Idx has been completely eliminated then this is a subvector extract
5395   // which naturally aligns to a vector register. These can easily be handled
5396   // using subregister manipulation.
5397   if (RemIdx == 0)
5398     return Op;
5399 
5400   // Else we must shift our vector register directly to extract the subvector.
5401   // Do this using VSLIDEDOWN.
5402 
5403   // If the vector type is an LMUL-group type, extract a subvector equal to the
5404   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5405   // instruction.
5406   MVT InterSubVT = VecVT;
5407   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5408     InterSubVT = getLMUL1VT(VecVT);
5409     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5410                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5411   }
5412 
5413   // Slide this vector register down by the desired number of elements in order
5414   // to place the desired subvector starting at element 0.
5415   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5416   // For scalable vectors this must be further multiplied by vscale.
5417   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5418 
5419   SDValue Mask, VL;
5420   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5421   SDValue Slidedown =
5422       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5423                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5424 
5425   // Now the vector is in the right position, extract our final subvector. This
5426   // should resolve to a COPY.
5427   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5428                           DAG.getConstant(0, DL, XLenVT));
5429 
5430   // We might have bitcast from a mask type: cast back to the original type if
5431   // required.
5432   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5433 }
5434 
5435 // Lower step_vector to the vid instruction. Any non-identity step value must
5436 // be accounted for my manual expansion.
5437 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5438                                               SelectionDAG &DAG) const {
5439   SDLoc DL(Op);
5440   MVT VT = Op.getSimpleValueType();
5441   MVT XLenVT = Subtarget.getXLenVT();
5442   SDValue Mask, VL;
5443   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5444   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5445   uint64_t StepValImm = Op.getConstantOperandVal(0);
5446   if (StepValImm != 1) {
5447     if (isPowerOf2_64(StepValImm)) {
5448       SDValue StepVal =
5449           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
5450                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5451       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5452     } else {
5453       SDValue StepVal = lowerScalarSplat(
5454           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
5455           DL, DAG, Subtarget);
5456       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5457     }
5458   }
5459   return StepVec;
5460 }
5461 
5462 // Implement vector_reverse using vrgather.vv with indices determined by
5463 // subtracting the id of each element from (VLMAX-1). This will convert
5464 // the indices like so:
5465 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5466 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5467 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5468                                                  SelectionDAG &DAG) const {
5469   SDLoc DL(Op);
5470   MVT VecVT = Op.getSimpleValueType();
5471   unsigned EltSize = VecVT.getScalarSizeInBits();
5472   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5473 
5474   unsigned MaxVLMAX = 0;
5475   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5476   if (VectorBitsMax != 0)
5477     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
5478 
5479   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5480   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5481 
5482   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5483   // to use vrgatherei16.vv.
5484   // TODO: It's also possible to use vrgatherei16.vv for other types to
5485   // decrease register width for the index calculation.
5486   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5487     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5488     // Reverse each half, then reassemble them in reverse order.
5489     // NOTE: It's also possible that after splitting that VLMAX no longer
5490     // requires vrgatherei16.vv.
5491     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5492       SDValue Lo, Hi;
5493       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5494       EVT LoVT, HiVT;
5495       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5496       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5497       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5498       // Reassemble the low and high pieces reversed.
5499       // FIXME: This is a CONCAT_VECTORS.
5500       SDValue Res =
5501           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5502                       DAG.getIntPtrConstant(0, DL));
5503       return DAG.getNode(
5504           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5505           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5506     }
5507 
5508     // Just promote the int type to i16 which will double the LMUL.
5509     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5510     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5511   }
5512 
5513   MVT XLenVT = Subtarget.getXLenVT();
5514   SDValue Mask, VL;
5515   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5516 
5517   // Calculate VLMAX-1 for the desired SEW.
5518   unsigned MinElts = VecVT.getVectorMinNumElements();
5519   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5520                               DAG.getConstant(MinElts, DL, XLenVT));
5521   SDValue VLMinus1 =
5522       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5523 
5524   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5525   bool IsRV32E64 =
5526       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5527   SDValue SplatVL;
5528   if (!IsRV32E64)
5529     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5530   else
5531     SplatVL =
5532         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, VLMinus1,
5533                     DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5534 
5535   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5536   SDValue Indices =
5537       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5538 
5539   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5540 }
5541 
5542 SDValue
5543 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5544                                                      SelectionDAG &DAG) const {
5545   SDLoc DL(Op);
5546   auto *Load = cast<LoadSDNode>(Op);
5547 
5548   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5549                                         Load->getMemoryVT(),
5550                                         *Load->getMemOperand()) &&
5551          "Expecting a correctly-aligned load");
5552 
5553   MVT VT = Op.getSimpleValueType();
5554   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5555 
5556   SDValue VL =
5557       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5558 
5559   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5560   SDValue NewLoad = DAG.getMemIntrinsicNode(
5561       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
5562       Load->getMemoryVT(), Load->getMemOperand());
5563 
5564   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5565   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5566 }
5567 
5568 SDValue
5569 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5570                                                       SelectionDAG &DAG) const {
5571   SDLoc DL(Op);
5572   auto *Store = cast<StoreSDNode>(Op);
5573 
5574   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5575                                         Store->getMemoryVT(),
5576                                         *Store->getMemOperand()) &&
5577          "Expecting a correctly-aligned store");
5578 
5579   SDValue StoreVal = Store->getValue();
5580   MVT VT = StoreVal.getSimpleValueType();
5581 
5582   // If the size less than a byte, we need to pad with zeros to make a byte.
5583   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5584     VT = MVT::v8i1;
5585     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5586                            DAG.getConstant(0, DL, VT), StoreVal,
5587                            DAG.getIntPtrConstant(0, DL));
5588   }
5589 
5590   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5591 
5592   SDValue VL =
5593       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5594 
5595   SDValue NewValue =
5596       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5597   return DAG.getMemIntrinsicNode(
5598       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
5599       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
5600       Store->getMemoryVT(), Store->getMemOperand());
5601 }
5602 
5603 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5604                                              SelectionDAG &DAG) const {
5605   SDLoc DL(Op);
5606   MVT VT = Op.getSimpleValueType();
5607 
5608   const auto *MemSD = cast<MemSDNode>(Op);
5609   EVT MemVT = MemSD->getMemoryVT();
5610   MachineMemOperand *MMO = MemSD->getMemOperand();
5611   SDValue Chain = MemSD->getChain();
5612   SDValue BasePtr = MemSD->getBasePtr();
5613 
5614   SDValue Mask, PassThru, VL;
5615   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5616     Mask = VPLoad->getMask();
5617     PassThru = DAG.getUNDEF(VT);
5618     VL = VPLoad->getVectorLength();
5619   } else {
5620     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5621     Mask = MLoad->getMask();
5622     PassThru = MLoad->getPassThru();
5623   }
5624 
5625   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5626 
5627   MVT XLenVT = Subtarget.getXLenVT();
5628 
5629   MVT ContainerVT = VT;
5630   if (VT.isFixedLengthVector()) {
5631     ContainerVT = getContainerForFixedLengthVector(VT);
5632     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5633     if (!IsUnmasked) {
5634       MVT MaskVT =
5635           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5636       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5637     }
5638   }
5639 
5640   if (!VL)
5641     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5642 
5643   unsigned IntID =
5644       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5645   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5646   if (IsUnmasked)
5647     Ops.push_back(DAG.getUNDEF(ContainerVT));
5648   else
5649     Ops.push_back(PassThru);
5650   Ops.push_back(BasePtr);
5651   if (!IsUnmasked)
5652     Ops.push_back(Mask);
5653   Ops.push_back(VL);
5654   if (!IsUnmasked)
5655     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5656 
5657   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5658 
5659   SDValue Result =
5660       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5661   Chain = Result.getValue(1);
5662 
5663   if (VT.isFixedLengthVector())
5664     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5665 
5666   return DAG.getMergeValues({Result, Chain}, DL);
5667 }
5668 
5669 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5670                                               SelectionDAG &DAG) const {
5671   SDLoc DL(Op);
5672 
5673   const auto *MemSD = cast<MemSDNode>(Op);
5674   EVT MemVT = MemSD->getMemoryVT();
5675   MachineMemOperand *MMO = MemSD->getMemOperand();
5676   SDValue Chain = MemSD->getChain();
5677   SDValue BasePtr = MemSD->getBasePtr();
5678   SDValue Val, Mask, VL;
5679 
5680   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5681     Val = VPStore->getValue();
5682     Mask = VPStore->getMask();
5683     VL = VPStore->getVectorLength();
5684   } else {
5685     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5686     Val = MStore->getValue();
5687     Mask = MStore->getMask();
5688   }
5689 
5690   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5691 
5692   MVT VT = Val.getSimpleValueType();
5693   MVT XLenVT = Subtarget.getXLenVT();
5694 
5695   MVT ContainerVT = VT;
5696   if (VT.isFixedLengthVector()) {
5697     ContainerVT = getContainerForFixedLengthVector(VT);
5698 
5699     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5700     if (!IsUnmasked) {
5701       MVT MaskVT =
5702           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5703       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5704     }
5705   }
5706 
5707   if (!VL)
5708     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5709 
5710   unsigned IntID =
5711       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5712   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5713   Ops.push_back(Val);
5714   Ops.push_back(BasePtr);
5715   if (!IsUnmasked)
5716     Ops.push_back(Mask);
5717   Ops.push_back(VL);
5718 
5719   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5720                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5721 }
5722 
5723 SDValue
5724 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5725                                                       SelectionDAG &DAG) const {
5726   MVT InVT = Op.getOperand(0).getSimpleValueType();
5727   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5728 
5729   MVT VT = Op.getSimpleValueType();
5730 
5731   SDValue Op1 =
5732       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5733   SDValue Op2 =
5734       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5735 
5736   SDLoc DL(Op);
5737   SDValue VL =
5738       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5739 
5740   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5741   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5742 
5743   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5744                             Op.getOperand(2), Mask, VL);
5745 
5746   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5747 }
5748 
5749 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5750     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5751   MVT VT = Op.getSimpleValueType();
5752 
5753   if (VT.getVectorElementType() == MVT::i1)
5754     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5755 
5756   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5757 }
5758 
5759 SDValue
5760 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5761                                                       SelectionDAG &DAG) const {
5762   unsigned Opc;
5763   switch (Op.getOpcode()) {
5764   default: llvm_unreachable("Unexpected opcode!");
5765   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5766   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5767   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5768   }
5769 
5770   return lowerToScalableOp(Op, DAG, Opc);
5771 }
5772 
5773 // Lower vector ABS to smax(X, sub(0, X)).
5774 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
5775   SDLoc DL(Op);
5776   MVT VT = Op.getSimpleValueType();
5777   SDValue X = Op.getOperand(0);
5778 
5779   assert(VT.isFixedLengthVector() && "Unexpected type");
5780 
5781   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5782   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
5783 
5784   SDValue Mask, VL;
5785   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5786 
5787   SDValue SplatZero =
5788       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5789                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5790   SDValue NegX =
5791       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5792   SDValue Max =
5793       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5794 
5795   return convertFromScalableVector(VT, Max, DAG, Subtarget);
5796 }
5797 
5798 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5799     SDValue Op, SelectionDAG &DAG) const {
5800   SDLoc DL(Op);
5801   MVT VT = Op.getSimpleValueType();
5802   SDValue Mag = Op.getOperand(0);
5803   SDValue Sign = Op.getOperand(1);
5804   assert(Mag.getValueType() == Sign.getValueType() &&
5805          "Can only handle COPYSIGN with matching types.");
5806 
5807   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5808   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5809   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5810 
5811   SDValue Mask, VL;
5812   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5813 
5814   SDValue CopySign =
5815       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5816 
5817   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5818 }
5819 
5820 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5821     SDValue Op, SelectionDAG &DAG) const {
5822   MVT VT = Op.getSimpleValueType();
5823   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5824 
5825   MVT I1ContainerVT =
5826       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5827 
5828   SDValue CC =
5829       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5830   SDValue Op1 =
5831       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5832   SDValue Op2 =
5833       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5834 
5835   SDLoc DL(Op);
5836   SDValue Mask, VL;
5837   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5838 
5839   SDValue Select =
5840       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5841 
5842   return convertFromScalableVector(VT, Select, DAG, Subtarget);
5843 }
5844 
5845 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5846                                                unsigned NewOpc,
5847                                                bool HasMask) const {
5848   MVT VT = Op.getSimpleValueType();
5849   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5850 
5851   // Create list of operands by converting existing ones to scalable types.
5852   SmallVector<SDValue, 6> Ops;
5853   for (const SDValue &V : Op->op_values()) {
5854     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5855 
5856     // Pass through non-vector operands.
5857     if (!V.getValueType().isVector()) {
5858       Ops.push_back(V);
5859       continue;
5860     }
5861 
5862     // "cast" fixed length vector to a scalable vector.
5863     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5864            "Only fixed length vectors are supported!");
5865     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5866   }
5867 
5868   SDLoc DL(Op);
5869   SDValue Mask, VL;
5870   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5871   if (HasMask)
5872     Ops.push_back(Mask);
5873   Ops.push_back(VL);
5874 
5875   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5876   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5877 }
5878 
5879 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5880 // * Operands of each node are assumed to be in the same order.
5881 // * The EVL operand is promoted from i32 to i64 on RV64.
5882 // * Fixed-length vectors are converted to their scalable-vector container
5883 //   types.
5884 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5885                                        unsigned RISCVISDOpc) const {
5886   SDLoc DL(Op);
5887   MVT VT = Op.getSimpleValueType();
5888   SmallVector<SDValue, 4> Ops;
5889 
5890   for (const auto &OpIdx : enumerate(Op->ops())) {
5891     SDValue V = OpIdx.value();
5892     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5893     // Pass through operands which aren't fixed-length vectors.
5894     if (!V.getValueType().isFixedLengthVector()) {
5895       Ops.push_back(V);
5896       continue;
5897     }
5898     // "cast" fixed length vector to a scalable vector.
5899     MVT OpVT = V.getSimpleValueType();
5900     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5901     assert(useRVVForFixedLengthVectorVT(OpVT) &&
5902            "Only fixed length vectors are supported!");
5903     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5904   }
5905 
5906   if (!VT.isFixedLengthVector())
5907     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5908 
5909   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5910 
5911   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5912 
5913   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5914 }
5915 
5916 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
5917                                             unsigned MaskOpc,
5918                                             unsigned VecOpc) const {
5919   MVT VT = Op.getSimpleValueType();
5920   if (VT.getVectorElementType() != MVT::i1)
5921     return lowerVPOp(Op, DAG, VecOpc);
5922 
5923   // It is safe to drop mask parameter as masked-off elements are undef.
5924   SDValue Op1 = Op->getOperand(0);
5925   SDValue Op2 = Op->getOperand(1);
5926   SDValue VL = Op->getOperand(3);
5927 
5928   MVT ContainerVT = VT;
5929   const bool IsFixed = VT.isFixedLengthVector();
5930   if (IsFixed) {
5931     ContainerVT = getContainerForFixedLengthVector(VT);
5932     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
5933     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
5934   }
5935 
5936   SDLoc DL(Op);
5937   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
5938   if (!IsFixed)
5939     return Val;
5940   return convertFromScalableVector(VT, Val, DAG, Subtarget);
5941 }
5942 
5943 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5944 // matched to a RVV indexed load. The RVV indexed load instructions only
5945 // support the "unsigned unscaled" addressing mode; indices are implicitly
5946 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5947 // signed or scaled indexing is extended to the XLEN value type and scaled
5948 // accordingly.
5949 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5950                                                SelectionDAG &DAG) const {
5951   SDLoc DL(Op);
5952   MVT VT = Op.getSimpleValueType();
5953 
5954   const auto *MemSD = cast<MemSDNode>(Op.getNode());
5955   EVT MemVT = MemSD->getMemoryVT();
5956   MachineMemOperand *MMO = MemSD->getMemOperand();
5957   SDValue Chain = MemSD->getChain();
5958   SDValue BasePtr = MemSD->getBasePtr();
5959 
5960   ISD::LoadExtType LoadExtType;
5961   SDValue Index, Mask, PassThru, VL;
5962 
5963   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5964     Index = VPGN->getIndex();
5965     Mask = VPGN->getMask();
5966     PassThru = DAG.getUNDEF(VT);
5967     VL = VPGN->getVectorLength();
5968     // VP doesn't support extending loads.
5969     LoadExtType = ISD::NON_EXTLOAD;
5970   } else {
5971     // Else it must be a MGATHER.
5972     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5973     Index = MGN->getIndex();
5974     Mask = MGN->getMask();
5975     PassThru = MGN->getPassThru();
5976     LoadExtType = MGN->getExtensionType();
5977   }
5978 
5979   MVT IndexVT = Index.getSimpleValueType();
5980   MVT XLenVT = Subtarget.getXLenVT();
5981 
5982   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5983          "Unexpected VTs!");
5984   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5985   // Targets have to explicitly opt-in for extending vector loads.
5986   assert(LoadExtType == ISD::NON_EXTLOAD &&
5987          "Unexpected extending MGATHER/VP_GATHER");
5988   (void)LoadExtType;
5989 
5990   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5991   // the selection of the masked intrinsics doesn't do this for us.
5992   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5993 
5994   MVT ContainerVT = VT;
5995   if (VT.isFixedLengthVector()) {
5996     // We need to use the larger of the result and index type to determine the
5997     // scalable type to use so we don't increase LMUL for any operand/result.
5998     if (VT.bitsGE(IndexVT)) {
5999       ContainerVT = getContainerForFixedLengthVector(VT);
6000       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6001                                  ContainerVT.getVectorElementCount());
6002     } else {
6003       IndexVT = getContainerForFixedLengthVector(IndexVT);
6004       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6005                                      IndexVT.getVectorElementCount());
6006     }
6007 
6008     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6009 
6010     if (!IsUnmasked) {
6011       MVT MaskVT =
6012           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6013       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6014       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6015     }
6016   }
6017 
6018   if (!VL)
6019     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6020 
6021   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6022     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6023     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6024                                    VL);
6025     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6026                         TrueMask, VL);
6027   }
6028 
6029   unsigned IntID =
6030       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6031   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6032   if (IsUnmasked)
6033     Ops.push_back(DAG.getUNDEF(ContainerVT));
6034   else
6035     Ops.push_back(PassThru);
6036   Ops.push_back(BasePtr);
6037   Ops.push_back(Index);
6038   if (!IsUnmasked)
6039     Ops.push_back(Mask);
6040   Ops.push_back(VL);
6041   if (!IsUnmasked)
6042     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6043 
6044   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6045   SDValue Result =
6046       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6047   Chain = Result.getValue(1);
6048 
6049   if (VT.isFixedLengthVector())
6050     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6051 
6052   return DAG.getMergeValues({Result, Chain}, DL);
6053 }
6054 
6055 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6056 // matched to a RVV indexed store. The RVV indexed store instructions only
6057 // support the "unsigned unscaled" addressing mode; indices are implicitly
6058 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6059 // signed or scaled indexing is extended to the XLEN value type and scaled
6060 // accordingly.
6061 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6062                                                 SelectionDAG &DAG) const {
6063   SDLoc DL(Op);
6064   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6065   EVT MemVT = MemSD->getMemoryVT();
6066   MachineMemOperand *MMO = MemSD->getMemOperand();
6067   SDValue Chain = MemSD->getChain();
6068   SDValue BasePtr = MemSD->getBasePtr();
6069 
6070   bool IsTruncatingStore = false;
6071   SDValue Index, Mask, Val, VL;
6072 
6073   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6074     Index = VPSN->getIndex();
6075     Mask = VPSN->getMask();
6076     Val = VPSN->getValue();
6077     VL = VPSN->getVectorLength();
6078     // VP doesn't support truncating stores.
6079     IsTruncatingStore = false;
6080   } else {
6081     // Else it must be a MSCATTER.
6082     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6083     Index = MSN->getIndex();
6084     Mask = MSN->getMask();
6085     Val = MSN->getValue();
6086     IsTruncatingStore = MSN->isTruncatingStore();
6087   }
6088 
6089   MVT VT = Val.getSimpleValueType();
6090   MVT IndexVT = Index.getSimpleValueType();
6091   MVT XLenVT = Subtarget.getXLenVT();
6092 
6093   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6094          "Unexpected VTs!");
6095   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6096   // Targets have to explicitly opt-in for extending vector loads and
6097   // truncating vector stores.
6098   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6099   (void)IsTruncatingStore;
6100 
6101   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6102   // the selection of the masked intrinsics doesn't do this for us.
6103   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6104 
6105   MVT ContainerVT = VT;
6106   if (VT.isFixedLengthVector()) {
6107     // We need to use the larger of the value and index type to determine the
6108     // scalable type to use so we don't increase LMUL for any operand/result.
6109     if (VT.bitsGE(IndexVT)) {
6110       ContainerVT = getContainerForFixedLengthVector(VT);
6111       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6112                                  ContainerVT.getVectorElementCount());
6113     } else {
6114       IndexVT = getContainerForFixedLengthVector(IndexVT);
6115       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6116                                      IndexVT.getVectorElementCount());
6117     }
6118 
6119     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6120     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6121 
6122     if (!IsUnmasked) {
6123       MVT MaskVT =
6124           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6125       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6126     }
6127   }
6128 
6129   if (!VL)
6130     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6131 
6132   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6133     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6134     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6135                                    VL);
6136     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6137                         TrueMask, VL);
6138   }
6139 
6140   unsigned IntID =
6141       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6142   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6143   Ops.push_back(Val);
6144   Ops.push_back(BasePtr);
6145   Ops.push_back(Index);
6146   if (!IsUnmasked)
6147     Ops.push_back(Mask);
6148   Ops.push_back(VL);
6149 
6150   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6151                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6152 }
6153 
6154 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6155                                                SelectionDAG &DAG) const {
6156   const MVT XLenVT = Subtarget.getXLenVT();
6157   SDLoc DL(Op);
6158   SDValue Chain = Op->getOperand(0);
6159   SDValue SysRegNo = DAG.getTargetConstant(
6160       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6161   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6162   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6163 
6164   // Encoding used for rounding mode in RISCV differs from that used in
6165   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6166   // table, which consists of a sequence of 4-bit fields, each representing
6167   // corresponding FLT_ROUNDS mode.
6168   static const int Table =
6169       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6170       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6171       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6172       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6173       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6174 
6175   SDValue Shift =
6176       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6177   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6178                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6179   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6180                                DAG.getConstant(7, DL, XLenVT));
6181 
6182   return DAG.getMergeValues({Masked, Chain}, DL);
6183 }
6184 
6185 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6186                                                SelectionDAG &DAG) const {
6187   const MVT XLenVT = Subtarget.getXLenVT();
6188   SDLoc DL(Op);
6189   SDValue Chain = Op->getOperand(0);
6190   SDValue RMValue = Op->getOperand(1);
6191   SDValue SysRegNo = DAG.getTargetConstant(
6192       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6193 
6194   // Encoding used for rounding mode in RISCV differs from that used in
6195   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6196   // a table, which consists of a sequence of 4-bit fields, each representing
6197   // corresponding RISCV mode.
6198   static const unsigned Table =
6199       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6200       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6201       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6202       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6203       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6204 
6205   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6206                               DAG.getConstant(2, DL, XLenVT));
6207   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6208                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6209   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6210                         DAG.getConstant(0x7, DL, XLenVT));
6211   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6212                      RMValue);
6213 }
6214 
6215 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6216   switch (IntNo) {
6217   default:
6218     llvm_unreachable("Unexpected Intrinsic");
6219   case Intrinsic::riscv_grev:
6220     return RISCVISD::GREVW;
6221   case Intrinsic::riscv_gorc:
6222     return RISCVISD::GORCW;
6223   case Intrinsic::riscv_bcompress:
6224     return RISCVISD::BCOMPRESSW;
6225   case Intrinsic::riscv_bdecompress:
6226     return RISCVISD::BDECOMPRESSW;
6227   case Intrinsic::riscv_bfp:
6228     return RISCVISD::BFPW;
6229   case Intrinsic::riscv_fsl:
6230     return RISCVISD::FSLW;
6231   case Intrinsic::riscv_fsr:
6232     return RISCVISD::FSRW;
6233   }
6234 }
6235 
6236 // Converts the given intrinsic to a i64 operation with any extension.
6237 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6238                                          unsigned IntNo) {
6239   SDLoc DL(N);
6240   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6241   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6242   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6243   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6244   // ReplaceNodeResults requires we maintain the same type for the return value.
6245   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6246 }
6247 
6248 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6249 // form of the given Opcode.
6250 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6251   switch (Opcode) {
6252   default:
6253     llvm_unreachable("Unexpected opcode");
6254   case ISD::SHL:
6255     return RISCVISD::SLLW;
6256   case ISD::SRA:
6257     return RISCVISD::SRAW;
6258   case ISD::SRL:
6259     return RISCVISD::SRLW;
6260   case ISD::SDIV:
6261     return RISCVISD::DIVW;
6262   case ISD::UDIV:
6263     return RISCVISD::DIVUW;
6264   case ISD::UREM:
6265     return RISCVISD::REMUW;
6266   case ISD::ROTL:
6267     return RISCVISD::ROLW;
6268   case ISD::ROTR:
6269     return RISCVISD::RORW;
6270   case RISCVISD::GREV:
6271     return RISCVISD::GREVW;
6272   case RISCVISD::GORC:
6273     return RISCVISD::GORCW;
6274   }
6275 }
6276 
6277 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6278 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6279 // otherwise be promoted to i64, making it difficult to select the
6280 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6281 // type i8/i16/i32 is lost.
6282 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6283                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6284   SDLoc DL(N);
6285   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6286   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6287   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6288   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6289   // ReplaceNodeResults requires we maintain the same type for the return value.
6290   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6291 }
6292 
6293 // Converts the given 32-bit operation to a i64 operation with signed extension
6294 // semantic to reduce the signed extension instructions.
6295 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6296   SDLoc DL(N);
6297   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6298   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6299   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6300   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6301                                DAG.getValueType(MVT::i32));
6302   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6303 }
6304 
6305 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6306                                              SmallVectorImpl<SDValue> &Results,
6307                                              SelectionDAG &DAG) const {
6308   SDLoc DL(N);
6309   switch (N->getOpcode()) {
6310   default:
6311     llvm_unreachable("Don't know how to custom type legalize this operation!");
6312   case ISD::STRICT_FP_TO_SINT:
6313   case ISD::STRICT_FP_TO_UINT:
6314   case ISD::FP_TO_SINT:
6315   case ISD::FP_TO_UINT: {
6316     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6317            "Unexpected custom legalisation");
6318     bool IsStrict = N->isStrictFPOpcode();
6319     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6320                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6321     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6322     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6323         TargetLowering::TypeSoftenFloat) {
6324       if (!isTypeLegal(Op0.getValueType()))
6325         return;
6326       if (IsStrict) {
6327         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6328                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6329         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6330         SDValue Res = DAG.getNode(
6331             Opc, DL, VTs, N->getOperand(0), Op0,
6332             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6333         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6334         Results.push_back(Res.getValue(1));
6335         return;
6336       }
6337       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6338       SDValue Res =
6339           DAG.getNode(Opc, DL, MVT::i64, Op0,
6340                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6341       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6342       return;
6343     }
6344     // If the FP type needs to be softened, emit a library call using the 'si'
6345     // version. If we left it to default legalization we'd end up with 'di'. If
6346     // the FP type doesn't need to be softened just let generic type
6347     // legalization promote the result type.
6348     RTLIB::Libcall LC;
6349     if (IsSigned)
6350       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6351     else
6352       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6353     MakeLibCallOptions CallOptions;
6354     EVT OpVT = Op0.getValueType();
6355     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6356     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6357     SDValue Result;
6358     std::tie(Result, Chain) =
6359         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6360     Results.push_back(Result);
6361     if (IsStrict)
6362       Results.push_back(Chain);
6363     break;
6364   }
6365   case ISD::READCYCLECOUNTER: {
6366     assert(!Subtarget.is64Bit() &&
6367            "READCYCLECOUNTER only has custom type legalization on riscv32");
6368 
6369     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6370     SDValue RCW =
6371         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6372 
6373     Results.push_back(
6374         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6375     Results.push_back(RCW.getValue(2));
6376     break;
6377   }
6378   case ISD::MUL: {
6379     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6380     unsigned XLen = Subtarget.getXLen();
6381     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6382     if (Size > XLen) {
6383       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6384       SDValue LHS = N->getOperand(0);
6385       SDValue RHS = N->getOperand(1);
6386       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6387 
6388       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6389       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6390       // We need exactly one side to be unsigned.
6391       if (LHSIsU == RHSIsU)
6392         return;
6393 
6394       auto MakeMULPair = [&](SDValue S, SDValue U) {
6395         MVT XLenVT = Subtarget.getXLenVT();
6396         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6397         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6398         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6399         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6400         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6401       };
6402 
6403       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6404       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6405 
6406       // The other operand should be signed, but still prefer MULH when
6407       // possible.
6408       if (RHSIsU && LHSIsS && !RHSIsS)
6409         Results.push_back(MakeMULPair(LHS, RHS));
6410       else if (LHSIsU && RHSIsS && !LHSIsS)
6411         Results.push_back(MakeMULPair(RHS, LHS));
6412 
6413       return;
6414     }
6415     LLVM_FALLTHROUGH;
6416   }
6417   case ISD::ADD:
6418   case ISD::SUB:
6419     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6420            "Unexpected custom legalisation");
6421     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6422     break;
6423   case ISD::SHL:
6424   case ISD::SRA:
6425   case ISD::SRL:
6426     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6427            "Unexpected custom legalisation");
6428     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6429       Results.push_back(customLegalizeToWOp(N, DAG));
6430       break;
6431     }
6432 
6433     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6434     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6435     // shift amount.
6436     if (N->getOpcode() == ISD::SHL) {
6437       SDLoc DL(N);
6438       SDValue NewOp0 =
6439           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6440       SDValue NewOp1 =
6441           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6442       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6443       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6444                                    DAG.getValueType(MVT::i32));
6445       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6446     }
6447 
6448     break;
6449   case ISD::ROTL:
6450   case ISD::ROTR:
6451     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6452            "Unexpected custom legalisation");
6453     Results.push_back(customLegalizeToWOp(N, DAG));
6454     break;
6455   case ISD::CTTZ:
6456   case ISD::CTTZ_ZERO_UNDEF:
6457   case ISD::CTLZ:
6458   case ISD::CTLZ_ZERO_UNDEF: {
6459     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6460            "Unexpected custom legalisation");
6461 
6462     SDValue NewOp0 =
6463         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6464     bool IsCTZ =
6465         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6466     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6467     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6468     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6469     return;
6470   }
6471   case ISD::SDIV:
6472   case ISD::UDIV:
6473   case ISD::UREM: {
6474     MVT VT = N->getSimpleValueType(0);
6475     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6476            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6477            "Unexpected custom legalisation");
6478     // Don't promote division/remainder by constant since we should expand those
6479     // to multiply by magic constant.
6480     // FIXME: What if the expansion is disabled for minsize.
6481     if (N->getOperand(1).getOpcode() == ISD::Constant)
6482       return;
6483 
6484     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6485     // the upper 32 bits. For other types we need to sign or zero extend
6486     // based on the opcode.
6487     unsigned ExtOpc = ISD::ANY_EXTEND;
6488     if (VT != MVT::i32)
6489       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6490                                            : ISD::ZERO_EXTEND;
6491 
6492     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6493     break;
6494   }
6495   case ISD::UADDO:
6496   case ISD::USUBO: {
6497     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6498            "Unexpected custom legalisation");
6499     bool IsAdd = N->getOpcode() == ISD::UADDO;
6500     // Create an ADDW or SUBW.
6501     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6502     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6503     SDValue Res =
6504         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6505     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6506                       DAG.getValueType(MVT::i32));
6507 
6508     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
6509     // Since the inputs are sign extended from i32, this is equivalent to
6510     // comparing the lower 32 bits.
6511     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6512     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6513                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
6514 
6515     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6516     Results.push_back(Overflow);
6517     return;
6518   }
6519   case ISD::UADDSAT:
6520   case ISD::USUBSAT: {
6521     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6522            "Unexpected custom legalisation");
6523     if (Subtarget.hasStdExtZbb()) {
6524       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6525       // sign extend allows overflow of the lower 32 bits to be detected on
6526       // the promoted size.
6527       SDValue LHS =
6528           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6529       SDValue RHS =
6530           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6531       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6532       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6533       return;
6534     }
6535 
6536     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6537     // promotion for UADDO/USUBO.
6538     Results.push_back(expandAddSubSat(N, DAG));
6539     return;
6540   }
6541   case ISD::BITCAST: {
6542     EVT VT = N->getValueType(0);
6543     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6544     SDValue Op0 = N->getOperand(0);
6545     EVT Op0VT = Op0.getValueType();
6546     MVT XLenVT = Subtarget.getXLenVT();
6547     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6548       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6549       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6550     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6551                Subtarget.hasStdExtF()) {
6552       SDValue FPConv =
6553           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6554       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6555     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6556                isTypeLegal(Op0VT)) {
6557       // Custom-legalize bitcasts from fixed-length vector types to illegal
6558       // scalar types in order to improve codegen. Bitcast the vector to a
6559       // one-element vector type whose element type is the same as the result
6560       // type, and extract the first element.
6561       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6562       if (isTypeLegal(BVT)) {
6563         SDValue BVec = DAG.getBitcast(BVT, Op0);
6564         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6565                                       DAG.getConstant(0, DL, XLenVT)));
6566       }
6567     }
6568     break;
6569   }
6570   case RISCVISD::GREV:
6571   case RISCVISD::GORC: {
6572     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6573            "Unexpected custom legalisation");
6574     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6575     // This is similar to customLegalizeToWOp, except that we pass the second
6576     // operand (a TargetConstant) straight through: it is already of type
6577     // XLenVT.
6578     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6579     SDValue NewOp0 =
6580         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6581     SDValue NewOp1 =
6582         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6583     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6584     // ReplaceNodeResults requires we maintain the same type for the return
6585     // value.
6586     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6587     break;
6588   }
6589   case RISCVISD::SHFL: {
6590     // There is no SHFLIW instruction, but we can just promote the operation.
6591     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6592            "Unexpected custom legalisation");
6593     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
6594     SDValue NewOp0 =
6595         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6596     SDValue NewOp1 =
6597         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6598     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
6599     // ReplaceNodeResults requires we maintain the same type for the return
6600     // value.
6601     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6602     break;
6603   }
6604   case ISD::BSWAP:
6605   case ISD::BITREVERSE: {
6606     MVT VT = N->getSimpleValueType(0);
6607     MVT XLenVT = Subtarget.getXLenVT();
6608     assert((VT == MVT::i8 || VT == MVT::i16 ||
6609             (VT == MVT::i32 && Subtarget.is64Bit())) &&
6610            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
6611     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
6612     unsigned Imm = VT.getSizeInBits() - 1;
6613     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
6614     if (N->getOpcode() == ISD::BSWAP)
6615       Imm &= ~0x7U;
6616     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
6617     SDValue GREVI =
6618         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
6619     // ReplaceNodeResults requires we maintain the same type for the return
6620     // value.
6621     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
6622     break;
6623   }
6624   case ISD::FSHL:
6625   case ISD::FSHR: {
6626     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6627            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
6628     SDValue NewOp0 =
6629         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6630     SDValue NewOp1 =
6631         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6632     SDValue NewShAmt =
6633         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6634     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
6635     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
6636     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
6637                            DAG.getConstant(0x1f, DL, MVT::i64));
6638     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
6639     // instruction use different orders. fshl will return its first operand for
6640     // shift of zero, fshr will return its second operand. fsl and fsr both
6641     // return rs1 so the ISD nodes need to have different operand orders.
6642     // Shift amount is in rs2.
6643     unsigned Opc = RISCVISD::FSLW;
6644     if (N->getOpcode() == ISD::FSHR) {
6645       std::swap(NewOp0, NewOp1);
6646       Opc = RISCVISD::FSRW;
6647     }
6648     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
6649     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
6650     break;
6651   }
6652   case ISD::EXTRACT_VECTOR_ELT: {
6653     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
6654     // type is illegal (currently only vXi64 RV32).
6655     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
6656     // transferred to the destination register. We issue two of these from the
6657     // upper- and lower- halves of the SEW-bit vector element, slid down to the
6658     // first element.
6659     SDValue Vec = N->getOperand(0);
6660     SDValue Idx = N->getOperand(1);
6661 
6662     // The vector type hasn't been legalized yet so we can't issue target
6663     // specific nodes if it needs legalization.
6664     // FIXME: We would manually legalize if it's important.
6665     if (!isTypeLegal(Vec.getValueType()))
6666       return;
6667 
6668     MVT VecVT = Vec.getSimpleValueType();
6669 
6670     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
6671            VecVT.getVectorElementType() == MVT::i64 &&
6672            "Unexpected EXTRACT_VECTOR_ELT legalization");
6673 
6674     // If this is a fixed vector, we need to convert it to a scalable vector.
6675     MVT ContainerVT = VecVT;
6676     if (VecVT.isFixedLengthVector()) {
6677       ContainerVT = getContainerForFixedLengthVector(VecVT);
6678       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
6679     }
6680 
6681     MVT XLenVT = Subtarget.getXLenVT();
6682 
6683     // Use a VL of 1 to avoid processing more elements than we need.
6684     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6685     SDValue VL = DAG.getConstant(1, DL, XLenVT);
6686     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6687 
6688     // Unless the index is known to be 0, we must slide the vector down to get
6689     // the desired element into index 0.
6690     if (!isNullConstant(Idx)) {
6691       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
6692                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
6693     }
6694 
6695     // Extract the lower XLEN bits of the correct vector element.
6696     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6697 
6698     // To extract the upper XLEN bits of the vector element, shift the first
6699     // element right by 32 bits and re-extract the lower XLEN bits.
6700     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6701                                      DAG.getConstant(32, DL, XLenVT), VL);
6702     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
6703                                  ThirtyTwoV, Mask, VL);
6704 
6705     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6706 
6707     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6708     break;
6709   }
6710   case ISD::INTRINSIC_WO_CHAIN: {
6711     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
6712     switch (IntNo) {
6713     default:
6714       llvm_unreachable(
6715           "Don't know how to custom type legalize this intrinsic!");
6716     case Intrinsic::riscv_grev:
6717     case Intrinsic::riscv_gorc:
6718     case Intrinsic::riscv_bcompress:
6719     case Intrinsic::riscv_bdecompress:
6720     case Intrinsic::riscv_bfp: {
6721       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6722              "Unexpected custom legalisation");
6723       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
6724       break;
6725     }
6726     case Intrinsic::riscv_fsl:
6727     case Intrinsic::riscv_fsr: {
6728       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6729              "Unexpected custom legalisation");
6730       SDValue NewOp1 =
6731           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6732       SDValue NewOp2 =
6733           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6734       SDValue NewOp3 =
6735           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
6736       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
6737       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
6738       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6739       break;
6740     }
6741     case Intrinsic::riscv_orc_b: {
6742       // Lower to the GORCI encoding for orc.b with the operand extended.
6743       SDValue NewOp =
6744           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6745       // If Zbp is enabled, use GORCIW which will sign extend the result.
6746       unsigned Opc =
6747           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
6748       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
6749                                 DAG.getConstant(7, DL, MVT::i64));
6750       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6751       return;
6752     }
6753     case Intrinsic::riscv_shfl:
6754     case Intrinsic::riscv_unshfl: {
6755       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6756              "Unexpected custom legalisation");
6757       SDValue NewOp1 =
6758           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6759       SDValue NewOp2 =
6760           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6761       unsigned Opc =
6762           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
6763       // There is no (UN)SHFLIW. If the control word is a constant, we can use
6764       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
6765       // will be shuffled the same way as the lower 32 bit half, but the two
6766       // halves won't cross.
6767       if (isa<ConstantSDNode>(NewOp2)) {
6768         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
6769                              DAG.getConstant(0xf, DL, MVT::i64));
6770         Opc =
6771             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
6772       }
6773       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
6774       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6775       break;
6776     }
6777     case Intrinsic::riscv_vmv_x_s: {
6778       EVT VT = N->getValueType(0);
6779       MVT XLenVT = Subtarget.getXLenVT();
6780       if (VT.bitsLT(XLenVT)) {
6781         // Simple case just extract using vmv.x.s and truncate.
6782         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
6783                                       Subtarget.getXLenVT(), N->getOperand(1));
6784         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
6785         return;
6786       }
6787 
6788       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
6789              "Unexpected custom legalization");
6790 
6791       // We need to do the move in two steps.
6792       SDValue Vec = N->getOperand(1);
6793       MVT VecVT = Vec.getSimpleValueType();
6794 
6795       // First extract the lower XLEN bits of the element.
6796       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
6797 
6798       // To extract the upper XLEN bits of the vector element, shift the first
6799       // element right by 32 bits and re-extract the lower XLEN bits.
6800       SDValue VL = DAG.getConstant(1, DL, XLenVT);
6801       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
6802       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
6803       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
6804                                        DAG.getConstant(32, DL, XLenVT), VL);
6805       SDValue LShr32 =
6806           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
6807       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
6808 
6809       Results.push_back(
6810           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
6811       break;
6812     }
6813     }
6814     break;
6815   }
6816   case ISD::VECREDUCE_ADD:
6817   case ISD::VECREDUCE_AND:
6818   case ISD::VECREDUCE_OR:
6819   case ISD::VECREDUCE_XOR:
6820   case ISD::VECREDUCE_SMAX:
6821   case ISD::VECREDUCE_UMAX:
6822   case ISD::VECREDUCE_SMIN:
6823   case ISD::VECREDUCE_UMIN:
6824     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
6825       Results.push_back(V);
6826     break;
6827   case ISD::VP_REDUCE_ADD:
6828   case ISD::VP_REDUCE_AND:
6829   case ISD::VP_REDUCE_OR:
6830   case ISD::VP_REDUCE_XOR:
6831   case ISD::VP_REDUCE_SMAX:
6832   case ISD::VP_REDUCE_UMAX:
6833   case ISD::VP_REDUCE_SMIN:
6834   case ISD::VP_REDUCE_UMIN:
6835     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
6836       Results.push_back(V);
6837     break;
6838   case ISD::FLT_ROUNDS_: {
6839     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
6840     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
6841     Results.push_back(Res.getValue(0));
6842     Results.push_back(Res.getValue(1));
6843     break;
6844   }
6845   }
6846 }
6847 
6848 // A structure to hold one of the bit-manipulation patterns below. Together, a
6849 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
6850 //   (or (and (shl x, 1), 0xAAAAAAAA),
6851 //       (and (srl x, 1), 0x55555555))
6852 struct RISCVBitmanipPat {
6853   SDValue Op;
6854   unsigned ShAmt;
6855   bool IsSHL;
6856 
6857   bool formsPairWith(const RISCVBitmanipPat &Other) const {
6858     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
6859   }
6860 };
6861 
6862 // Matches patterns of the form
6863 //   (and (shl x, C2), (C1 << C2))
6864 //   (and (srl x, C2), C1)
6865 //   (shl (and x, C1), C2)
6866 //   (srl (and x, (C1 << C2)), C2)
6867 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
6868 // The expected masks for each shift amount are specified in BitmanipMasks where
6869 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
6870 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
6871 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
6872 // XLen is 64.
6873 static Optional<RISCVBitmanipPat>
6874 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
6875   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
6876          "Unexpected number of masks");
6877   Optional<uint64_t> Mask;
6878   // Optionally consume a mask around the shift operation.
6879   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
6880     Mask = Op.getConstantOperandVal(1);
6881     Op = Op.getOperand(0);
6882   }
6883   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
6884     return None;
6885   bool IsSHL = Op.getOpcode() == ISD::SHL;
6886 
6887   if (!isa<ConstantSDNode>(Op.getOperand(1)))
6888     return None;
6889   uint64_t ShAmt = Op.getConstantOperandVal(1);
6890 
6891   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6892   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6893     return None;
6894   // If we don't have enough masks for 64 bit, then we must be trying to
6895   // match SHFL so we're only allowed to shift 1/4 of the width.
6896   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6897     return None;
6898 
6899   SDValue Src = Op.getOperand(0);
6900 
6901   // The expected mask is shifted left when the AND is found around SHL
6902   // patterns.
6903   //   ((x >> 1) & 0x55555555)
6904   //   ((x << 1) & 0xAAAAAAAA)
6905   bool SHLExpMask = IsSHL;
6906 
6907   if (!Mask) {
6908     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6909     // the mask is all ones: consume that now.
6910     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6911       Mask = Src.getConstantOperandVal(1);
6912       Src = Src.getOperand(0);
6913       // The expected mask is now in fact shifted left for SRL, so reverse the
6914       // decision.
6915       //   ((x & 0xAAAAAAAA) >> 1)
6916       //   ((x & 0x55555555) << 1)
6917       SHLExpMask = !SHLExpMask;
6918     } else {
6919       // Use a default shifted mask of all-ones if there's no AND, truncated
6920       // down to the expected width. This simplifies the logic later on.
6921       Mask = maskTrailingOnes<uint64_t>(Width);
6922       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6923     }
6924   }
6925 
6926   unsigned MaskIdx = Log2_32(ShAmt);
6927   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6928 
6929   if (SHLExpMask)
6930     ExpMask <<= ShAmt;
6931 
6932   if (Mask != ExpMask)
6933     return None;
6934 
6935   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6936 }
6937 
6938 // Matches any of the following bit-manipulation patterns:
6939 //   (and (shl x, 1), (0x55555555 << 1))
6940 //   (and (srl x, 1), 0x55555555)
6941 //   (shl (and x, 0x55555555), 1)
6942 //   (srl (and x, (0x55555555 << 1)), 1)
6943 // where the shift amount and mask may vary thus:
6944 //   [1]  = 0x55555555 / 0xAAAAAAAA
6945 //   [2]  = 0x33333333 / 0xCCCCCCCC
6946 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
6947 //   [8]  = 0x00FF00FF / 0xFF00FF00
6948 //   [16] = 0x0000FFFF / 0xFFFFFFFF
6949 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
6950 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6951   // These are the unshifted masks which we use to match bit-manipulation
6952   // patterns. They may be shifted left in certain circumstances.
6953   static const uint64_t BitmanipMasks[] = {
6954       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6955       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6956 
6957   return matchRISCVBitmanipPat(Op, BitmanipMasks);
6958 }
6959 
6960 // Match the following pattern as a GREVI(W) operation
6961 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
6962 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6963                                const RISCVSubtarget &Subtarget) {
6964   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6965   EVT VT = Op.getValueType();
6966 
6967   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6968     auto LHS = matchGREVIPat(Op.getOperand(0));
6969     auto RHS = matchGREVIPat(Op.getOperand(1));
6970     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6971       SDLoc DL(Op);
6972       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6973                          DAG.getConstant(LHS->ShAmt, DL, VT));
6974     }
6975   }
6976   return SDValue();
6977 }
6978 
6979 // Matches any the following pattern as a GORCI(W) operation
6980 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
6981 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
6982 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6983 // Note that with the variant of 3.,
6984 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6985 // the inner pattern will first be matched as GREVI and then the outer
6986 // pattern will be matched to GORC via the first rule above.
6987 // 4.  (or (rotl/rotr x, bitwidth/2), x)
6988 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6989                                const RISCVSubtarget &Subtarget) {
6990   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6991   EVT VT = Op.getValueType();
6992 
6993   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6994     SDLoc DL(Op);
6995     SDValue Op0 = Op.getOperand(0);
6996     SDValue Op1 = Op.getOperand(1);
6997 
6998     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6999       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7000           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7001           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7002         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7003       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7004       if ((Reverse.getOpcode() == ISD::ROTL ||
7005            Reverse.getOpcode() == ISD::ROTR) &&
7006           Reverse.getOperand(0) == X &&
7007           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7008         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7009         if (RotAmt == (VT.getSizeInBits() / 2))
7010           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7011                              DAG.getConstant(RotAmt, DL, VT));
7012       }
7013       return SDValue();
7014     };
7015 
7016     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7017     if (SDValue V = MatchOROfReverse(Op0, Op1))
7018       return V;
7019     if (SDValue V = MatchOROfReverse(Op1, Op0))
7020       return V;
7021 
7022     // OR is commutable so canonicalize its OR operand to the left
7023     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7024       std::swap(Op0, Op1);
7025     if (Op0.getOpcode() != ISD::OR)
7026       return SDValue();
7027     SDValue OrOp0 = Op0.getOperand(0);
7028     SDValue OrOp1 = Op0.getOperand(1);
7029     auto LHS = matchGREVIPat(OrOp0);
7030     // OR is commutable so swap the operands and try again: x might have been
7031     // on the left
7032     if (!LHS) {
7033       std::swap(OrOp0, OrOp1);
7034       LHS = matchGREVIPat(OrOp0);
7035     }
7036     auto RHS = matchGREVIPat(Op1);
7037     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7038       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7039                          DAG.getConstant(LHS->ShAmt, DL, VT));
7040     }
7041   }
7042   return SDValue();
7043 }
7044 
7045 // Matches any of the following bit-manipulation patterns:
7046 //   (and (shl x, 1), (0x22222222 << 1))
7047 //   (and (srl x, 1), 0x22222222)
7048 //   (shl (and x, 0x22222222), 1)
7049 //   (srl (and x, (0x22222222 << 1)), 1)
7050 // where the shift amount and mask may vary thus:
7051 //   [1]  = 0x22222222 / 0x44444444
7052 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7053 //   [4]  = 0x00F000F0 / 0x0F000F00
7054 //   [8]  = 0x0000FF00 / 0x00FF0000
7055 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7056 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7057   // These are the unshifted masks which we use to match bit-manipulation
7058   // patterns. They may be shifted left in certain circumstances.
7059   static const uint64_t BitmanipMasks[] = {
7060       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7061       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7062 
7063   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7064 }
7065 
7066 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7067 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7068                                const RISCVSubtarget &Subtarget) {
7069   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7070   EVT VT = Op.getValueType();
7071 
7072   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7073     return SDValue();
7074 
7075   SDValue Op0 = Op.getOperand(0);
7076   SDValue Op1 = Op.getOperand(1);
7077 
7078   // Or is commutable so canonicalize the second OR to the LHS.
7079   if (Op0.getOpcode() != ISD::OR)
7080     std::swap(Op0, Op1);
7081   if (Op0.getOpcode() != ISD::OR)
7082     return SDValue();
7083 
7084   // We found an inner OR, so our operands are the operands of the inner OR
7085   // and the other operand of the outer OR.
7086   SDValue A = Op0.getOperand(0);
7087   SDValue B = Op0.getOperand(1);
7088   SDValue C = Op1;
7089 
7090   auto Match1 = matchSHFLPat(A);
7091   auto Match2 = matchSHFLPat(B);
7092 
7093   // If neither matched, we failed.
7094   if (!Match1 && !Match2)
7095     return SDValue();
7096 
7097   // We had at least one match. if one failed, try the remaining C operand.
7098   if (!Match1) {
7099     std::swap(A, C);
7100     Match1 = matchSHFLPat(A);
7101     if (!Match1)
7102       return SDValue();
7103   } else if (!Match2) {
7104     std::swap(B, C);
7105     Match2 = matchSHFLPat(B);
7106     if (!Match2)
7107       return SDValue();
7108   }
7109   assert(Match1 && Match2);
7110 
7111   // Make sure our matches pair up.
7112   if (!Match1->formsPairWith(*Match2))
7113     return SDValue();
7114 
7115   // All the remains is to make sure C is an AND with the same input, that masks
7116   // out the bits that are being shuffled.
7117   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7118       C.getOperand(0) != Match1->Op)
7119     return SDValue();
7120 
7121   uint64_t Mask = C.getConstantOperandVal(1);
7122 
7123   static const uint64_t BitmanipMasks[] = {
7124       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7125       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7126   };
7127 
7128   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7129   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7130   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7131 
7132   if (Mask != ExpMask)
7133     return SDValue();
7134 
7135   SDLoc DL(Op);
7136   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7137                      DAG.getConstant(Match1->ShAmt, DL, VT));
7138 }
7139 
7140 // Optimize (add (shl x, c0), (shl y, c1)) ->
7141 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7142 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7143                                   const RISCVSubtarget &Subtarget) {
7144   // Perform this optimization only in the zba extension.
7145   if (!Subtarget.hasStdExtZba())
7146     return SDValue();
7147 
7148   // Skip for vector types and larger types.
7149   EVT VT = N->getValueType(0);
7150   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7151     return SDValue();
7152 
7153   // The two operand nodes must be SHL and have no other use.
7154   SDValue N0 = N->getOperand(0);
7155   SDValue N1 = N->getOperand(1);
7156   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7157       !N0->hasOneUse() || !N1->hasOneUse())
7158     return SDValue();
7159 
7160   // Check c0 and c1.
7161   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7162   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7163   if (!N0C || !N1C)
7164     return SDValue();
7165   int64_t C0 = N0C->getSExtValue();
7166   int64_t C1 = N1C->getSExtValue();
7167   if (C0 <= 0 || C1 <= 0)
7168     return SDValue();
7169 
7170   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7171   int64_t Bits = std::min(C0, C1);
7172   int64_t Diff = std::abs(C0 - C1);
7173   if (Diff != 1 && Diff != 2 && Diff != 3)
7174     return SDValue();
7175 
7176   // Build nodes.
7177   SDLoc DL(N);
7178   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7179   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7180   SDValue NA0 =
7181       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7182   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7183   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7184 }
7185 
7186 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7187 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7188 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7189 // not undo itself, but they are redundant.
7190 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7191   SDValue Src = N->getOperand(0);
7192 
7193   if (Src.getOpcode() != N->getOpcode())
7194     return SDValue();
7195 
7196   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7197       !isa<ConstantSDNode>(Src.getOperand(1)))
7198     return SDValue();
7199 
7200   unsigned ShAmt1 = N->getConstantOperandVal(1);
7201   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7202   Src = Src.getOperand(0);
7203 
7204   unsigned CombinedShAmt;
7205   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
7206     CombinedShAmt = ShAmt1 | ShAmt2;
7207   else
7208     CombinedShAmt = ShAmt1 ^ ShAmt2;
7209 
7210   if (CombinedShAmt == 0)
7211     return Src;
7212 
7213   SDLoc DL(N);
7214   return DAG.getNode(
7215       N->getOpcode(), DL, N->getValueType(0), Src,
7216       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7217 }
7218 
7219 // Combine a constant select operand into its use:
7220 //
7221 // (and (select cond, -1, c), x)
7222 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7223 // (or  (select cond, 0, c), x)
7224 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7225 // (xor (select cond, 0, c), x)
7226 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7227 // (add (select cond, 0, c), x)
7228 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7229 // (sub x, (select cond, 0, c))
7230 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7231 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7232                                    SelectionDAG &DAG, bool AllOnes) {
7233   EVT VT = N->getValueType(0);
7234 
7235   // Skip vectors.
7236   if (VT.isVector())
7237     return SDValue();
7238 
7239   if ((Slct.getOpcode() != ISD::SELECT &&
7240        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7241       !Slct.hasOneUse())
7242     return SDValue();
7243 
7244   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7245     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7246   };
7247 
7248   bool SwapSelectOps;
7249   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7250   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7251   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7252   SDValue NonConstantVal;
7253   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7254     SwapSelectOps = false;
7255     NonConstantVal = FalseVal;
7256   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7257     SwapSelectOps = true;
7258     NonConstantVal = TrueVal;
7259   } else
7260     return SDValue();
7261 
7262   // Slct is now know to be the desired identity constant when CC is true.
7263   TrueVal = OtherOp;
7264   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7265   // Unless SwapSelectOps says the condition should be false.
7266   if (SwapSelectOps)
7267     std::swap(TrueVal, FalseVal);
7268 
7269   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7270     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7271                        {Slct.getOperand(0), Slct.getOperand(1),
7272                         Slct.getOperand(2), TrueVal, FalseVal});
7273 
7274   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7275                      {Slct.getOperand(0), TrueVal, FalseVal});
7276 }
7277 
7278 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7279 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7280                                               bool AllOnes) {
7281   SDValue N0 = N->getOperand(0);
7282   SDValue N1 = N->getOperand(1);
7283   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7284     return Result;
7285   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7286     return Result;
7287   return SDValue();
7288 }
7289 
7290 // Transform (add (mul x, c0), c1) ->
7291 //           (add (mul (add x, c1/c0), c0), c1%c0).
7292 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7293 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7294 // to an infinite loop in DAGCombine if transformed.
7295 // Or transform (add (mul x, c0), c1) ->
7296 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7297 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7298 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7299 // lead to an infinite loop in DAGCombine if transformed.
7300 // Or transform (add (mul x, c0), c1) ->
7301 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7302 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7303 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7304 // lead to an infinite loop in DAGCombine if transformed.
7305 // Or transform (add (mul x, c0), c1) ->
7306 //              (mul (add x, c1/c0), c0).
7307 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7308 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7309                                      const RISCVSubtarget &Subtarget) {
7310   // Skip for vector types and larger types.
7311   EVT VT = N->getValueType(0);
7312   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7313     return SDValue();
7314   // The first operand node must be a MUL and has no other use.
7315   SDValue N0 = N->getOperand(0);
7316   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7317     return SDValue();
7318   // Check if c0 and c1 match above conditions.
7319   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7320   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7321   if (!N0C || !N1C)
7322     return SDValue();
7323   int64_t C0 = N0C->getSExtValue();
7324   int64_t C1 = N1C->getSExtValue();
7325   int64_t CA, CB;
7326   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7327     return SDValue();
7328   // Search for proper CA (non-zero) and CB that both are simm12.
7329   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7330       !isInt<12>(C0 * (C1 / C0))) {
7331     CA = C1 / C0;
7332     CB = C1 % C0;
7333   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7334              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7335     CA = C1 / C0 + 1;
7336     CB = C1 % C0 - C0;
7337   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7338              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7339     CA = C1 / C0 - 1;
7340     CB = C1 % C0 + C0;
7341   } else
7342     return SDValue();
7343   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7344   SDLoc DL(N);
7345   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7346                              DAG.getConstant(CA, DL, VT));
7347   SDValue New1 =
7348       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7349   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7350 }
7351 
7352 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7353                                  const RISCVSubtarget &Subtarget) {
7354   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7355     return V;
7356   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7357     return V;
7358   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7359   //      (select lhs, rhs, cc, x, (add x, y))
7360   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7361 }
7362 
7363 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7364   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7365   //      (select lhs, rhs, cc, x, (sub x, y))
7366   SDValue N0 = N->getOperand(0);
7367   SDValue N1 = N->getOperand(1);
7368   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7369 }
7370 
7371 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7372   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7373   //      (select lhs, rhs, cc, x, (and x, y))
7374   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7375 }
7376 
7377 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7378                                 const RISCVSubtarget &Subtarget) {
7379   if (Subtarget.hasStdExtZbp()) {
7380     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7381       return GREV;
7382     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7383       return GORC;
7384     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7385       return SHFL;
7386   }
7387 
7388   // fold (or (select cond, 0, y), x) ->
7389   //      (select cond, x, (or x, y))
7390   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7391 }
7392 
7393 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7394   // fold (xor (select cond, 0, y), x) ->
7395   //      (select cond, x, (xor x, y))
7396   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7397 }
7398 
7399 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
7400 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
7401 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
7402 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
7403 // ADDW/SUBW/MULW.
7404 static SDValue performANY_EXTENDCombine(SDNode *N,
7405                                         TargetLowering::DAGCombinerInfo &DCI,
7406                                         const RISCVSubtarget &Subtarget) {
7407   if (!Subtarget.is64Bit())
7408     return SDValue();
7409 
7410   SelectionDAG &DAG = DCI.DAG;
7411 
7412   SDValue Src = N->getOperand(0);
7413   EVT VT = N->getValueType(0);
7414   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
7415     return SDValue();
7416 
7417   // The opcode must be one that can implicitly sign_extend.
7418   // FIXME: Additional opcodes.
7419   switch (Src.getOpcode()) {
7420   default:
7421     return SDValue();
7422   case ISD::MUL:
7423     if (!Subtarget.hasStdExtM())
7424       return SDValue();
7425     LLVM_FALLTHROUGH;
7426   case ISD::ADD:
7427   case ISD::SUB:
7428     break;
7429   }
7430 
7431   // Only handle cases where the result is used by a CopyToReg. That likely
7432   // means the value is a liveout of the basic block. This helps prevent
7433   // infinite combine loops like PR51206.
7434   if (none_of(N->uses(),
7435               [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
7436     return SDValue();
7437 
7438   SmallVector<SDNode *, 4> SetCCs;
7439   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
7440                             UE = Src.getNode()->use_end();
7441        UI != UE; ++UI) {
7442     SDNode *User = *UI;
7443     if (User == N)
7444       continue;
7445     if (UI.getUse().getResNo() != Src.getResNo())
7446       continue;
7447     // All i32 setccs are legalized by sign extending operands.
7448     if (User->getOpcode() == ISD::SETCC) {
7449       SetCCs.push_back(User);
7450       continue;
7451     }
7452     // We don't know if we can extend this user.
7453     break;
7454   }
7455 
7456   // If we don't have any SetCCs, this isn't worthwhile.
7457   if (SetCCs.empty())
7458     return SDValue();
7459 
7460   SDLoc DL(N);
7461   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
7462   DCI.CombineTo(N, SExt);
7463 
7464   // Promote all the setccs.
7465   for (SDNode *SetCC : SetCCs) {
7466     SmallVector<SDValue, 4> Ops;
7467 
7468     for (unsigned j = 0; j != 2; ++j) {
7469       SDValue SOp = SetCC->getOperand(j);
7470       if (SOp == Src)
7471         Ops.push_back(SExt);
7472       else
7473         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
7474     }
7475 
7476     Ops.push_back(SetCC->getOperand(2));
7477     DCI.CombineTo(SetCC,
7478                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
7479   }
7480   return SDValue(N, 0);
7481 }
7482 
7483 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7484 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7485 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7486                                              bool Commute = false) {
7487   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7488           N->getOpcode() == RISCVISD::SUB_VL) &&
7489          "Unexpected opcode");
7490   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7491   SDValue Op0 = N->getOperand(0);
7492   SDValue Op1 = N->getOperand(1);
7493   if (Commute)
7494     std::swap(Op0, Op1);
7495 
7496   MVT VT = N->getSimpleValueType(0);
7497 
7498   // Determine the narrow size for a widening add/sub.
7499   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7500   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7501                                   VT.getVectorElementCount());
7502 
7503   SDValue Mask = N->getOperand(2);
7504   SDValue VL = N->getOperand(3);
7505 
7506   SDLoc DL(N);
7507 
7508   // If the RHS is a sext or zext, we can form a widening op.
7509   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7510        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7511       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7512     unsigned ExtOpc = Op1.getOpcode();
7513     Op1 = Op1.getOperand(0);
7514     // Re-introduce narrower extends if needed.
7515     if (Op1.getValueType() != NarrowVT)
7516       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7517 
7518     unsigned WOpc;
7519     if (ExtOpc == RISCVISD::VSEXT_VL)
7520       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7521     else
7522       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7523 
7524     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7525   }
7526 
7527   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7528   // sext/zext?
7529 
7530   return SDValue();
7531 }
7532 
7533 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7534 // vwsub(u).vv/vx.
7535 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7536   SDValue Op0 = N->getOperand(0);
7537   SDValue Op1 = N->getOperand(1);
7538   SDValue Mask = N->getOperand(2);
7539   SDValue VL = N->getOperand(3);
7540 
7541   MVT VT = N->getSimpleValueType(0);
7542   MVT NarrowVT = Op1.getSimpleValueType();
7543   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7544 
7545   unsigned VOpc;
7546   switch (N->getOpcode()) {
7547   default: llvm_unreachable("Unexpected opcode");
7548   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7549   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
7550   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
7551   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
7552   }
7553 
7554   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7555                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
7556 
7557   SDLoc DL(N);
7558 
7559   // If the LHS is a sext or zext, we can narrow this op to the same size as
7560   // the RHS.
7561   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
7562        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
7563       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
7564     unsigned ExtOpc = Op0.getOpcode();
7565     Op0 = Op0.getOperand(0);
7566     // Re-introduce narrower extends if needed.
7567     if (Op0.getValueType() != NarrowVT)
7568       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7569     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
7570   }
7571 
7572   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
7573                N->getOpcode() == RISCVISD::VWADDU_W_VL;
7574 
7575   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
7576   // to commute and use a vwadd(u).vx instead.
7577   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
7578       Op0.getOperand(1) == VL) {
7579     Op0 = Op0.getOperand(0);
7580 
7581     // See if have enough sign bits or zero bits in the scalar to use a
7582     // widening add/sub by splatting to smaller element size.
7583     unsigned EltBits = VT.getScalarSizeInBits();
7584     unsigned ScalarBits = Op0.getValueSizeInBits();
7585     // Make sure we're getting all element bits from the scalar register.
7586     // FIXME: Support implicit sign extension of vmv.v.x?
7587     if (ScalarBits < EltBits)
7588       return SDValue();
7589 
7590     if (IsSigned) {
7591       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
7592         return SDValue();
7593     } else {
7594       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7595       if (!DAG.MaskedValueIsZero(Op0, Mask))
7596         return SDValue();
7597     }
7598 
7599     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op0, VL);
7600     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
7601   }
7602 
7603   return SDValue();
7604 }
7605 
7606 // Try to form VWMUL, VWMULU or VWMULSU.
7607 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
7608 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
7609                                        bool Commute) {
7610   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
7611   SDValue Op0 = N->getOperand(0);
7612   SDValue Op1 = N->getOperand(1);
7613   if (Commute)
7614     std::swap(Op0, Op1);
7615 
7616   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
7617   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
7618   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
7619   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
7620     return SDValue();
7621 
7622   SDValue Mask = N->getOperand(2);
7623   SDValue VL = N->getOperand(3);
7624 
7625   // Make sure the mask and VL match.
7626   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
7627     return SDValue();
7628 
7629   MVT VT = N->getSimpleValueType(0);
7630 
7631   // Determine the narrow size for a widening multiply.
7632   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7633   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7634                                   VT.getVectorElementCount());
7635 
7636   SDLoc DL(N);
7637 
7638   // See if the other operand is the same opcode.
7639   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
7640     if (!Op1.hasOneUse())
7641       return SDValue();
7642 
7643     // Make sure the mask and VL match.
7644     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
7645       return SDValue();
7646 
7647     Op1 = Op1.getOperand(0);
7648   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
7649     // The operand is a splat of a scalar.
7650 
7651     // The VL must be the same.
7652     if (Op1.getOperand(1) != VL)
7653       return SDValue();
7654 
7655     // Get the scalar value.
7656     Op1 = Op1.getOperand(0);
7657 
7658     // See if have enough sign bits or zero bits in the scalar to use a
7659     // widening multiply by splatting to smaller element size.
7660     unsigned EltBits = VT.getScalarSizeInBits();
7661     unsigned ScalarBits = Op1.getValueSizeInBits();
7662     // Make sure we're getting all element bits from the scalar register.
7663     // FIXME: Support implicit sign extension of vmv.v.x?
7664     if (ScalarBits < EltBits)
7665       return SDValue();
7666 
7667     if (IsSignExt) {
7668       if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
7669         return SDValue();
7670     } else {
7671       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
7672       if (!DAG.MaskedValueIsZero(Op1, Mask))
7673         return SDValue();
7674     }
7675 
7676     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
7677   } else
7678     return SDValue();
7679 
7680   Op0 = Op0.getOperand(0);
7681 
7682   // Re-introduce narrower extends if needed.
7683   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
7684   if (Op0.getValueType() != NarrowVT)
7685     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
7686   if (Op1.getValueType() != NarrowVT)
7687     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7688 
7689   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
7690   if (!IsVWMULSU)
7691     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
7692   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
7693 }
7694 
7695 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
7696   switch (Op.getOpcode()) {
7697   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
7698   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
7699   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
7700   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
7701   case ISD::FROUND:     return RISCVFPRndMode::RMM;
7702   }
7703 
7704   return RISCVFPRndMode::Invalid;
7705 }
7706 
7707 // Fold
7708 //   (fp_to_int (froundeven X)) -> fcvt X, rne
7709 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
7710 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
7711 //   (fp_to_int (fceil X))      -> fcvt X, rup
7712 //   (fp_to_int (fround X))     -> fcvt X, rmm
7713 static SDValue performFP_TO_INTCombine(SDNode *N,
7714                                        TargetLowering::DAGCombinerInfo &DCI,
7715                                        const RISCVSubtarget &Subtarget) {
7716   SelectionDAG &DAG = DCI.DAG;
7717   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7718   MVT XLenVT = Subtarget.getXLenVT();
7719 
7720   // Only handle XLen or i32 types. Other types narrower than XLen will
7721   // eventually be legalized to XLenVT.
7722   EVT VT = N->getValueType(0);
7723   if (VT != MVT::i32 && VT != XLenVT)
7724     return SDValue();
7725 
7726   SDValue Src = N->getOperand(0);
7727 
7728   // Ensure the FP type is also legal.
7729   if (!TLI.isTypeLegal(Src.getValueType()))
7730     return SDValue();
7731 
7732   // Don't do this for f16 with Zfhmin and not Zfh.
7733   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7734     return SDValue();
7735 
7736   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7737   if (FRM == RISCVFPRndMode::Invalid)
7738     return SDValue();
7739 
7740   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
7741 
7742   unsigned Opc;
7743   if (VT == XLenVT)
7744     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7745   else
7746     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7747 
7748   SDLoc DL(N);
7749   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
7750                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7751   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
7752 }
7753 
7754 // Fold
7755 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
7756 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
7757 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
7758 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
7759 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
7760 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
7761                                        TargetLowering::DAGCombinerInfo &DCI,
7762                                        const RISCVSubtarget &Subtarget) {
7763   SelectionDAG &DAG = DCI.DAG;
7764   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7765   MVT XLenVT = Subtarget.getXLenVT();
7766 
7767   // Only handle XLen types. Other types narrower than XLen will eventually be
7768   // legalized to XLenVT.
7769   EVT DstVT = N->getValueType(0);
7770   if (DstVT != XLenVT)
7771     return SDValue();
7772 
7773   SDValue Src = N->getOperand(0);
7774 
7775   // Ensure the FP type is also legal.
7776   if (!TLI.isTypeLegal(Src.getValueType()))
7777     return SDValue();
7778 
7779   // Don't do this for f16 with Zfhmin and not Zfh.
7780   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
7781     return SDValue();
7782 
7783   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
7784 
7785   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
7786   if (FRM == RISCVFPRndMode::Invalid)
7787     return SDValue();
7788 
7789   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
7790 
7791   unsigned Opc;
7792   if (SatVT == DstVT)
7793     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
7794   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
7795     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
7796   else
7797     return SDValue();
7798   // FIXME: Support other SatVTs by clamping before or after the conversion.
7799 
7800   Src = Src.getOperand(0);
7801 
7802   SDLoc DL(N);
7803   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
7804                                 DAG.getTargetConstant(FRM, DL, XLenVT));
7805 
7806   // RISCV FP-to-int conversions saturate to the destination register size, but
7807   // don't produce 0 for nan.
7808   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
7809   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
7810 }
7811 
7812 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
7813                                                DAGCombinerInfo &DCI) const {
7814   SelectionDAG &DAG = DCI.DAG;
7815 
7816   // Helper to call SimplifyDemandedBits on an operand of N where only some low
7817   // bits are demanded. N will be added to the Worklist if it was not deleted.
7818   // Caller should return SDValue(N, 0) if this returns true.
7819   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
7820     SDValue Op = N->getOperand(OpNo);
7821     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
7822     if (!SimplifyDemandedBits(Op, Mask, DCI))
7823       return false;
7824 
7825     if (N->getOpcode() != ISD::DELETED_NODE)
7826       DCI.AddToWorklist(N);
7827     return true;
7828   };
7829 
7830   switch (N->getOpcode()) {
7831   default:
7832     break;
7833   case RISCVISD::SplitF64: {
7834     SDValue Op0 = N->getOperand(0);
7835     // If the input to SplitF64 is just BuildPairF64 then the operation is
7836     // redundant. Instead, use BuildPairF64's operands directly.
7837     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
7838       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
7839 
7840     if (Op0->isUndef()) {
7841       SDValue Lo = DAG.getUNDEF(MVT::i32);
7842       SDValue Hi = DAG.getUNDEF(MVT::i32);
7843       return DCI.CombineTo(N, Lo, Hi);
7844     }
7845 
7846     SDLoc DL(N);
7847 
7848     // It's cheaper to materialise two 32-bit integers than to load a double
7849     // from the constant pool and transfer it to integer registers through the
7850     // stack.
7851     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
7852       APInt V = C->getValueAPF().bitcastToAPInt();
7853       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
7854       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
7855       return DCI.CombineTo(N, Lo, Hi);
7856     }
7857 
7858     // This is a target-specific version of a DAGCombine performed in
7859     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7860     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7861     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7862     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7863         !Op0.getNode()->hasOneUse())
7864       break;
7865     SDValue NewSplitF64 =
7866         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
7867                     Op0.getOperand(0));
7868     SDValue Lo = NewSplitF64.getValue(0);
7869     SDValue Hi = NewSplitF64.getValue(1);
7870     APInt SignBit = APInt::getSignMask(32);
7871     if (Op0.getOpcode() == ISD::FNEG) {
7872       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
7873                                   DAG.getConstant(SignBit, DL, MVT::i32));
7874       return DCI.CombineTo(N, Lo, NewHi);
7875     }
7876     assert(Op0.getOpcode() == ISD::FABS);
7877     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
7878                                 DAG.getConstant(~SignBit, DL, MVT::i32));
7879     return DCI.CombineTo(N, Lo, NewHi);
7880   }
7881   case RISCVISD::SLLW:
7882   case RISCVISD::SRAW:
7883   case RISCVISD::SRLW:
7884   case RISCVISD::ROLW:
7885   case RISCVISD::RORW: {
7886     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7887     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7888         SimplifyDemandedLowBitsHelper(1, 5))
7889       return SDValue(N, 0);
7890     break;
7891   }
7892   case RISCVISD::CLZW:
7893   case RISCVISD::CTZW: {
7894     // Only the lower 32 bits of the first operand are read
7895     if (SimplifyDemandedLowBitsHelper(0, 32))
7896       return SDValue(N, 0);
7897     break;
7898   }
7899   case RISCVISD::GREV:
7900   case RISCVISD::GORC: {
7901     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
7902     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7903     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7904     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
7905       return SDValue(N, 0);
7906 
7907     return combineGREVI_GORCI(N, DAG);
7908   }
7909   case RISCVISD::GREVW:
7910   case RISCVISD::GORCW: {
7911     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
7912     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7913         SimplifyDemandedLowBitsHelper(1, 5))
7914       return SDValue(N, 0);
7915 
7916     return combineGREVI_GORCI(N, DAG);
7917   }
7918   case RISCVISD::SHFL:
7919   case RISCVISD::UNSHFL: {
7920     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
7921     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
7922     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
7923     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
7924       return SDValue(N, 0);
7925 
7926     break;
7927   }
7928   case RISCVISD::SHFLW:
7929   case RISCVISD::UNSHFLW: {
7930     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
7931     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7932         SimplifyDemandedLowBitsHelper(1, 4))
7933       return SDValue(N, 0);
7934 
7935     break;
7936   }
7937   case RISCVISD::BCOMPRESSW:
7938   case RISCVISD::BDECOMPRESSW: {
7939     // Only the lower 32 bits of LHS and RHS are read.
7940     if (SimplifyDemandedLowBitsHelper(0, 32) ||
7941         SimplifyDemandedLowBitsHelper(1, 32))
7942       return SDValue(N, 0);
7943 
7944     break;
7945   }
7946   case RISCVISD::FMV_X_ANYEXTH:
7947   case RISCVISD::FMV_X_ANYEXTW_RV64: {
7948     SDLoc DL(N);
7949     SDValue Op0 = N->getOperand(0);
7950     MVT VT = N->getSimpleValueType(0);
7951     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
7952     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
7953     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
7954     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
7955          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
7956         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7957          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
7958       assert(Op0.getOperand(0).getValueType() == VT &&
7959              "Unexpected value type!");
7960       return Op0.getOperand(0);
7961     }
7962 
7963     // This is a target-specific version of a DAGCombine performed in
7964     // DAGCombiner::visitBITCAST. It performs the equivalent of:
7965     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
7966     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
7967     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
7968         !Op0.getNode()->hasOneUse())
7969       break;
7970     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
7971     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
7972     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
7973     if (Op0.getOpcode() == ISD::FNEG)
7974       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
7975                          DAG.getConstant(SignBit, DL, VT));
7976 
7977     assert(Op0.getOpcode() == ISD::FABS);
7978     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
7979                        DAG.getConstant(~SignBit, DL, VT));
7980   }
7981   case ISD::ADD:
7982     return performADDCombine(N, DAG, Subtarget);
7983   case ISD::SUB:
7984     return performSUBCombine(N, DAG);
7985   case ISD::AND:
7986     return performANDCombine(N, DAG);
7987   case ISD::OR:
7988     return performORCombine(N, DAG, Subtarget);
7989   case ISD::XOR:
7990     return performXORCombine(N, DAG);
7991   case ISD::ANY_EXTEND:
7992     return performANY_EXTENDCombine(N, DCI, Subtarget);
7993   case ISD::ZERO_EXTEND:
7994     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
7995     // type legalization. This is safe because fp_to_uint produces poison if
7996     // it overflows.
7997     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
7998       SDValue Src = N->getOperand(0);
7999       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8000           isTypeLegal(Src.getOperand(0).getValueType()))
8001         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8002                            Src.getOperand(0));
8003       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8004           isTypeLegal(Src.getOperand(1).getValueType())) {
8005         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8006         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8007                                   Src.getOperand(0), Src.getOperand(1));
8008         DCI.CombineTo(N, Res);
8009         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8010         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8011         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8012       }
8013     }
8014     return SDValue();
8015   case RISCVISD::SELECT_CC: {
8016     // Transform
8017     SDValue LHS = N->getOperand(0);
8018     SDValue RHS = N->getOperand(1);
8019     SDValue TrueV = N->getOperand(3);
8020     SDValue FalseV = N->getOperand(4);
8021 
8022     // If the True and False values are the same, we don't need a select_cc.
8023     if (TrueV == FalseV)
8024       return TrueV;
8025 
8026     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8027     if (!ISD::isIntEqualitySetCC(CCVal))
8028       break;
8029 
8030     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8031     //      (select_cc X, Y, lt, trueV, falseV)
8032     // Sometimes the setcc is introduced after select_cc has been formed.
8033     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8034         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8035       // If we're looking for eq 0 instead of ne 0, we need to invert the
8036       // condition.
8037       bool Invert = CCVal == ISD::SETEQ;
8038       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8039       if (Invert)
8040         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8041 
8042       SDLoc DL(N);
8043       RHS = LHS.getOperand(1);
8044       LHS = LHS.getOperand(0);
8045       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8046 
8047       SDValue TargetCC = DAG.getCondCode(CCVal);
8048       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8049                          {LHS, RHS, TargetCC, TrueV, FalseV});
8050     }
8051 
8052     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8053     //      (select_cc X, Y, eq/ne, trueV, falseV)
8054     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8055       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8056                          {LHS.getOperand(0), LHS.getOperand(1),
8057                           N->getOperand(2), TrueV, FalseV});
8058     // (select_cc X, 1, setne, trueV, falseV) ->
8059     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8060     // This can occur when legalizing some floating point comparisons.
8061     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8062     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8063       SDLoc DL(N);
8064       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8065       SDValue TargetCC = DAG.getCondCode(CCVal);
8066       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8067       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8068                          {LHS, RHS, TargetCC, TrueV, FalseV});
8069     }
8070 
8071     break;
8072   }
8073   case RISCVISD::BR_CC: {
8074     SDValue LHS = N->getOperand(1);
8075     SDValue RHS = N->getOperand(2);
8076     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8077     if (!ISD::isIntEqualitySetCC(CCVal))
8078       break;
8079 
8080     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8081     //      (br_cc X, Y, lt, dest)
8082     // Sometimes the setcc is introduced after br_cc has been formed.
8083     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8084         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8085       // If we're looking for eq 0 instead of ne 0, we need to invert the
8086       // condition.
8087       bool Invert = CCVal == ISD::SETEQ;
8088       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8089       if (Invert)
8090         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8091 
8092       SDLoc DL(N);
8093       RHS = LHS.getOperand(1);
8094       LHS = LHS.getOperand(0);
8095       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8096 
8097       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8098                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8099                          N->getOperand(4));
8100     }
8101 
8102     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8103     //      (br_cc X, Y, eq/ne, trueV, falseV)
8104     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8105       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8106                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8107                          N->getOperand(3), N->getOperand(4));
8108 
8109     // (br_cc X, 1, setne, br_cc) ->
8110     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8111     // This can occur when legalizing some floating point comparisons.
8112     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8113     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8114       SDLoc DL(N);
8115       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8116       SDValue TargetCC = DAG.getCondCode(CCVal);
8117       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8118       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8119                          N->getOperand(0), LHS, RHS, TargetCC,
8120                          N->getOperand(4));
8121     }
8122     break;
8123   }
8124   case ISD::FP_TO_SINT:
8125   case ISD::FP_TO_UINT:
8126     return performFP_TO_INTCombine(N, DCI, Subtarget);
8127   case ISD::FP_TO_SINT_SAT:
8128   case ISD::FP_TO_UINT_SAT:
8129     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8130   case ISD::FCOPYSIGN: {
8131     EVT VT = N->getValueType(0);
8132     if (!VT.isVector())
8133       break;
8134     // There is a form of VFSGNJ which injects the negated sign of its second
8135     // operand. Try and bubble any FNEG up after the extend/round to produce
8136     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8137     // TRUNC=1.
8138     SDValue In2 = N->getOperand(1);
8139     // Avoid cases where the extend/round has multiple uses, as duplicating
8140     // those is typically more expensive than removing a fneg.
8141     if (!In2.hasOneUse())
8142       break;
8143     if (In2.getOpcode() != ISD::FP_EXTEND &&
8144         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8145       break;
8146     In2 = In2.getOperand(0);
8147     if (In2.getOpcode() != ISD::FNEG)
8148       break;
8149     SDLoc DL(N);
8150     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8151     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8152                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8153   }
8154   case ISD::MGATHER:
8155   case ISD::MSCATTER:
8156   case ISD::VP_GATHER:
8157   case ISD::VP_SCATTER: {
8158     if (!DCI.isBeforeLegalize())
8159       break;
8160     SDValue Index, ScaleOp;
8161     bool IsIndexScaled = false;
8162     bool IsIndexSigned = false;
8163     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8164       Index = VPGSN->getIndex();
8165       ScaleOp = VPGSN->getScale();
8166       IsIndexScaled = VPGSN->isIndexScaled();
8167       IsIndexSigned = VPGSN->isIndexSigned();
8168     } else {
8169       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8170       Index = MGSN->getIndex();
8171       ScaleOp = MGSN->getScale();
8172       IsIndexScaled = MGSN->isIndexScaled();
8173       IsIndexSigned = MGSN->isIndexSigned();
8174     }
8175     EVT IndexVT = Index.getValueType();
8176     MVT XLenVT = Subtarget.getXLenVT();
8177     // RISCV indexed loads only support the "unsigned unscaled" addressing
8178     // mode, so anything else must be manually legalized.
8179     bool NeedsIdxLegalization =
8180         IsIndexScaled ||
8181         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8182     if (!NeedsIdxLegalization)
8183       break;
8184 
8185     SDLoc DL(N);
8186 
8187     // Any index legalization should first promote to XLenVT, so we don't lose
8188     // bits when scaling. This may create an illegal index type so we let
8189     // LLVM's legalization take care of the splitting.
8190     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8191     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8192       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8193       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8194                           DL, IndexVT, Index);
8195     }
8196 
8197     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8198     if (IsIndexScaled && Scale != 1) {
8199       // Manually scale the indices by the element size.
8200       // TODO: Sanitize the scale operand here?
8201       // TODO: For VP nodes, should we use VP_SHL here?
8202       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8203       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8204       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8205     }
8206 
8207     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8208     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8209       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8210                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8211                               VPGN->getScale(), VPGN->getMask(),
8212                               VPGN->getVectorLength()},
8213                              VPGN->getMemOperand(), NewIndexTy);
8214     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8215       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8216                               {VPSN->getChain(), VPSN->getValue(),
8217                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8218                                VPSN->getMask(), VPSN->getVectorLength()},
8219                               VPSN->getMemOperand(), NewIndexTy);
8220     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8221       return DAG.getMaskedGather(
8222           N->getVTList(), MGN->getMemoryVT(), DL,
8223           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8224            MGN->getBasePtr(), Index, MGN->getScale()},
8225           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8226     const auto *MSN = cast<MaskedScatterSDNode>(N);
8227     return DAG.getMaskedScatter(
8228         N->getVTList(), MSN->getMemoryVT(), DL,
8229         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8230          Index, MSN->getScale()},
8231         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8232   }
8233   case RISCVISD::SRA_VL:
8234   case RISCVISD::SRL_VL:
8235   case RISCVISD::SHL_VL: {
8236     SDValue ShAmt = N->getOperand(1);
8237     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8238       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8239       SDLoc DL(N);
8240       SDValue VL = N->getOperand(3);
8241       EVT VT = N->getValueType(0);
8242       ShAmt =
8243           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
8244       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8245                          N->getOperand(2), N->getOperand(3));
8246     }
8247     break;
8248   }
8249   case ISD::SRA:
8250   case ISD::SRL:
8251   case ISD::SHL: {
8252     SDValue ShAmt = N->getOperand(1);
8253     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8254       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8255       SDLoc DL(N);
8256       EVT VT = N->getValueType(0);
8257       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0),
8258                           DAG.getTargetConstant(RISCV::VLMaxSentinel, DL,
8259                                                 Subtarget.getXLenVT()));
8260       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8261     }
8262     break;
8263   }
8264   case RISCVISD::ADD_VL:
8265     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8266       return V;
8267     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8268   case RISCVISD::SUB_VL:
8269     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8270   case RISCVISD::VWADD_W_VL:
8271   case RISCVISD::VWADDU_W_VL:
8272   case RISCVISD::VWSUB_W_VL:
8273   case RISCVISD::VWSUBU_W_VL:
8274     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8275   case RISCVISD::MUL_VL:
8276     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8277       return V;
8278     // Mul is commutative.
8279     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8280   case ISD::STORE: {
8281     auto *Store = cast<StoreSDNode>(N);
8282     SDValue Val = Store->getValue();
8283     // Combine store of vmv.x.s to vse with VL of 1.
8284     // FIXME: Support FP.
8285     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8286       SDValue Src = Val.getOperand(0);
8287       EVT VecVT = Src.getValueType();
8288       EVT MemVT = Store->getMemoryVT();
8289       // The memory VT and the element type must match.
8290       if (VecVT.getVectorElementType() == MemVT) {
8291         SDLoc DL(N);
8292         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8293         return DAG.getStoreVP(
8294             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8295             DAG.getConstant(1, DL, MaskVT),
8296             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8297             Store->getMemOperand(), Store->getAddressingMode(),
8298             Store->isTruncatingStore(), /*IsCompress*/ false);
8299       }
8300     }
8301 
8302     break;
8303   }
8304   case ISD::SPLAT_VECTOR: {
8305     EVT VT = N->getValueType(0);
8306     // Only perform this combine on legal MVT types.
8307     if (!isTypeLegal(VT))
8308       break;
8309     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8310                                          DAG, Subtarget))
8311       return Gather;
8312     break;
8313   }
8314   }
8315 
8316   return SDValue();
8317 }
8318 
8319 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8320     const SDNode *N, CombineLevel Level) const {
8321   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8322   // materialised in fewer instructions than `(OP _, c1)`:
8323   //
8324   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8325   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8326   SDValue N0 = N->getOperand(0);
8327   EVT Ty = N0.getValueType();
8328   if (Ty.isScalarInteger() &&
8329       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8330     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8331     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8332     if (C1 && C2) {
8333       const APInt &C1Int = C1->getAPIntValue();
8334       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8335 
8336       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8337       // and the combine should happen, to potentially allow further combines
8338       // later.
8339       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8340           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8341         return true;
8342 
8343       // We can materialise `c1` in an add immediate, so it's "free", and the
8344       // combine should be prevented.
8345       if (C1Int.getMinSignedBits() <= 64 &&
8346           isLegalAddImmediate(C1Int.getSExtValue()))
8347         return false;
8348 
8349       // Neither constant will fit into an immediate, so find materialisation
8350       // costs.
8351       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8352                                               Subtarget.getFeatureBits(),
8353                                               /*CompressionCost*/true);
8354       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8355           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8356           /*CompressionCost*/true);
8357 
8358       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8359       // combine should be prevented.
8360       if (C1Cost < ShiftedC1Cost)
8361         return false;
8362     }
8363   }
8364   return true;
8365 }
8366 
8367 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8368     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8369     TargetLoweringOpt &TLO) const {
8370   // Delay this optimization as late as possible.
8371   if (!TLO.LegalOps)
8372     return false;
8373 
8374   EVT VT = Op.getValueType();
8375   if (VT.isVector())
8376     return false;
8377 
8378   // Only handle AND for now.
8379   if (Op.getOpcode() != ISD::AND)
8380     return false;
8381 
8382   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8383   if (!C)
8384     return false;
8385 
8386   const APInt &Mask = C->getAPIntValue();
8387 
8388   // Clear all non-demanded bits initially.
8389   APInt ShrunkMask = Mask & DemandedBits;
8390 
8391   // Try to make a smaller immediate by setting undemanded bits.
8392 
8393   APInt ExpandedMask = Mask | ~DemandedBits;
8394 
8395   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8396     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8397   };
8398   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8399     if (NewMask == Mask)
8400       return true;
8401     SDLoc DL(Op);
8402     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8403     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8404     return TLO.CombineTo(Op, NewOp);
8405   };
8406 
8407   // If the shrunk mask fits in sign extended 12 bits, let the target
8408   // independent code apply it.
8409   if (ShrunkMask.isSignedIntN(12))
8410     return false;
8411 
8412   // Preserve (and X, 0xffff) when zext.h is supported.
8413   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8414     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8415     if (IsLegalMask(NewMask))
8416       return UseMask(NewMask);
8417   }
8418 
8419   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8420   if (VT == MVT::i64) {
8421     APInt NewMask = APInt(64, 0xffffffff);
8422     if (IsLegalMask(NewMask))
8423       return UseMask(NewMask);
8424   }
8425 
8426   // For the remaining optimizations, we need to be able to make a negative
8427   // number through a combination of mask and undemanded bits.
8428   if (!ExpandedMask.isNegative())
8429     return false;
8430 
8431   // What is the fewest number of bits we need to represent the negative number.
8432   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8433 
8434   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8435   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8436   APInt NewMask = ShrunkMask;
8437   if (MinSignedBits <= 12)
8438     NewMask.setBitsFrom(11);
8439   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8440     NewMask.setBitsFrom(31);
8441   else
8442     return false;
8443 
8444   // Check that our new mask is a subset of the demanded mask.
8445   assert(IsLegalMask(NewMask));
8446   return UseMask(NewMask);
8447 }
8448 
8449 static void computeGREV(APInt &Src, unsigned ShAmt) {
8450   ShAmt &= Src.getBitWidth() - 1;
8451   uint64_t x = Src.getZExtValue();
8452   if (ShAmt & 1)
8453     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
8454   if (ShAmt & 2)
8455     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
8456   if (ShAmt & 4)
8457     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
8458   if (ShAmt & 8)
8459     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
8460   if (ShAmt & 16)
8461     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
8462   if (ShAmt & 32)
8463     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
8464   Src = x;
8465 }
8466 
8467 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
8468                                                         KnownBits &Known,
8469                                                         const APInt &DemandedElts,
8470                                                         const SelectionDAG &DAG,
8471                                                         unsigned Depth) const {
8472   unsigned BitWidth = Known.getBitWidth();
8473   unsigned Opc = Op.getOpcode();
8474   assert((Opc >= ISD::BUILTIN_OP_END ||
8475           Opc == ISD::INTRINSIC_WO_CHAIN ||
8476           Opc == ISD::INTRINSIC_W_CHAIN ||
8477           Opc == ISD::INTRINSIC_VOID) &&
8478          "Should use MaskedValueIsZero if you don't know whether Op"
8479          " is a target node!");
8480 
8481   Known.resetAll();
8482   switch (Opc) {
8483   default: break;
8484   case RISCVISD::SELECT_CC: {
8485     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
8486     // If we don't know any bits, early out.
8487     if (Known.isUnknown())
8488       break;
8489     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
8490 
8491     // Only known if known in both the LHS and RHS.
8492     Known = KnownBits::commonBits(Known, Known2);
8493     break;
8494   }
8495   case RISCVISD::REMUW: {
8496     KnownBits Known2;
8497     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8498     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8499     // We only care about the lower 32 bits.
8500     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
8501     // Restore the original width by sign extending.
8502     Known = Known.sext(BitWidth);
8503     break;
8504   }
8505   case RISCVISD::DIVUW: {
8506     KnownBits Known2;
8507     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
8508     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
8509     // We only care about the lower 32 bits.
8510     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
8511     // Restore the original width by sign extending.
8512     Known = Known.sext(BitWidth);
8513     break;
8514   }
8515   case RISCVISD::CTZW: {
8516     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8517     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
8518     unsigned LowBits = Log2_32(PossibleTZ) + 1;
8519     Known.Zero.setBitsFrom(LowBits);
8520     break;
8521   }
8522   case RISCVISD::CLZW: {
8523     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8524     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
8525     unsigned LowBits = Log2_32(PossibleLZ) + 1;
8526     Known.Zero.setBitsFrom(LowBits);
8527     break;
8528   }
8529   case RISCVISD::GREV:
8530   case RISCVISD::GREVW: {
8531     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
8532       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
8533       if (Opc == RISCVISD::GREVW)
8534         Known = Known.trunc(32);
8535       unsigned ShAmt = C->getZExtValue();
8536       computeGREV(Known.Zero, ShAmt);
8537       computeGREV(Known.One, ShAmt);
8538       if (Opc == RISCVISD::GREVW)
8539         Known = Known.sext(BitWidth);
8540     }
8541     break;
8542   }
8543   case RISCVISD::READ_VLENB: {
8544     // If we know the minimum VLen from Zvl extensions, we can use that to
8545     // determine the trailing zeros of VLENB.
8546     // FIXME: Limit to 128 bit vectors until we have more testing.
8547     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
8548     if (MinVLenB > 0)
8549       Known.Zero.setLowBits(Log2_32(MinVLenB));
8550     // We assume VLENB is no more than 65536 / 8 bytes.
8551     Known.Zero.setBitsFrom(14);
8552     break;
8553   }
8554   case ISD::INTRINSIC_W_CHAIN:
8555   case ISD::INTRINSIC_WO_CHAIN: {
8556     unsigned IntNo =
8557         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
8558     switch (IntNo) {
8559     default:
8560       // We can't do anything for most intrinsics.
8561       break;
8562     case Intrinsic::riscv_vsetvli:
8563     case Intrinsic::riscv_vsetvlimax:
8564     case Intrinsic::riscv_vsetvli_opt:
8565     case Intrinsic::riscv_vsetvlimax_opt:
8566       // Assume that VL output is positive and would fit in an int32_t.
8567       // TODO: VLEN might be capped at 16 bits in a future V spec update.
8568       if (BitWidth >= 32)
8569         Known.Zero.setBitsFrom(31);
8570       break;
8571     }
8572     break;
8573   }
8574   }
8575 }
8576 
8577 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
8578     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
8579     unsigned Depth) const {
8580   switch (Op.getOpcode()) {
8581   default:
8582     break;
8583   case RISCVISD::SELECT_CC: {
8584     unsigned Tmp =
8585         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
8586     if (Tmp == 1) return 1;  // Early out.
8587     unsigned Tmp2 =
8588         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
8589     return std::min(Tmp, Tmp2);
8590   }
8591   case RISCVISD::SLLW:
8592   case RISCVISD::SRAW:
8593   case RISCVISD::SRLW:
8594   case RISCVISD::DIVW:
8595   case RISCVISD::DIVUW:
8596   case RISCVISD::REMUW:
8597   case RISCVISD::ROLW:
8598   case RISCVISD::RORW:
8599   case RISCVISD::GREVW:
8600   case RISCVISD::GORCW:
8601   case RISCVISD::FSLW:
8602   case RISCVISD::FSRW:
8603   case RISCVISD::SHFLW:
8604   case RISCVISD::UNSHFLW:
8605   case RISCVISD::BCOMPRESSW:
8606   case RISCVISD::BDECOMPRESSW:
8607   case RISCVISD::BFPW:
8608   case RISCVISD::FCVT_W_RV64:
8609   case RISCVISD::FCVT_WU_RV64:
8610   case RISCVISD::STRICT_FCVT_W_RV64:
8611   case RISCVISD::STRICT_FCVT_WU_RV64:
8612     // TODO: As the result is sign-extended, this is conservatively correct. A
8613     // more precise answer could be calculated for SRAW depending on known
8614     // bits in the shift amount.
8615     return 33;
8616   case RISCVISD::SHFL:
8617   case RISCVISD::UNSHFL: {
8618     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
8619     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
8620     // will stay within the upper 32 bits. If there were more than 32 sign bits
8621     // before there will be at least 33 sign bits after.
8622     if (Op.getValueType() == MVT::i64 &&
8623         isa<ConstantSDNode>(Op.getOperand(1)) &&
8624         (Op.getConstantOperandVal(1) & 0x10) == 0) {
8625       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
8626       if (Tmp > 32)
8627         return 33;
8628     }
8629     break;
8630   }
8631   case RISCVISD::VMV_X_S: {
8632     // The number of sign bits of the scalar result is computed by obtaining the
8633     // element type of the input vector operand, subtracting its width from the
8634     // XLEN, and then adding one (sign bit within the element type). If the
8635     // element type is wider than XLen, the least-significant XLEN bits are
8636     // taken.
8637     unsigned XLen = Subtarget.getXLen();
8638     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
8639     if (EltBits <= XLen)
8640       return XLen - EltBits + 1;
8641     break;
8642   }
8643   }
8644 
8645   return 1;
8646 }
8647 
8648 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
8649                                                   MachineBasicBlock *BB) {
8650   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
8651 
8652   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
8653   // Should the count have wrapped while it was being read, we need to try
8654   // again.
8655   // ...
8656   // read:
8657   // rdcycleh x3 # load high word of cycle
8658   // rdcycle  x2 # load low word of cycle
8659   // rdcycleh x4 # load high word of cycle
8660   // bne x3, x4, read # check if high word reads match, otherwise try again
8661   // ...
8662 
8663   MachineFunction &MF = *BB->getParent();
8664   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8665   MachineFunction::iterator It = ++BB->getIterator();
8666 
8667   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8668   MF.insert(It, LoopMBB);
8669 
8670   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
8671   MF.insert(It, DoneMBB);
8672 
8673   // Transfer the remainder of BB and its successor edges to DoneMBB.
8674   DoneMBB->splice(DoneMBB->begin(), BB,
8675                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
8676   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
8677 
8678   BB->addSuccessor(LoopMBB);
8679 
8680   MachineRegisterInfo &RegInfo = MF.getRegInfo();
8681   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8682   Register LoReg = MI.getOperand(0).getReg();
8683   Register HiReg = MI.getOperand(1).getReg();
8684   DebugLoc DL = MI.getDebugLoc();
8685 
8686   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
8687   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
8688       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8689       .addReg(RISCV::X0);
8690   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
8691       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
8692       .addReg(RISCV::X0);
8693   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
8694       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
8695       .addReg(RISCV::X0);
8696 
8697   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
8698       .addReg(HiReg)
8699       .addReg(ReadAgainReg)
8700       .addMBB(LoopMBB);
8701 
8702   LoopMBB->addSuccessor(LoopMBB);
8703   LoopMBB->addSuccessor(DoneMBB);
8704 
8705   MI.eraseFromParent();
8706 
8707   return DoneMBB;
8708 }
8709 
8710 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
8711                                              MachineBasicBlock *BB) {
8712   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
8713 
8714   MachineFunction &MF = *BB->getParent();
8715   DebugLoc DL = MI.getDebugLoc();
8716   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8717   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8718   Register LoReg = MI.getOperand(0).getReg();
8719   Register HiReg = MI.getOperand(1).getReg();
8720   Register SrcReg = MI.getOperand(2).getReg();
8721   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
8722   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8723 
8724   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
8725                           RI);
8726   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8727   MachineMemOperand *MMOLo =
8728       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
8729   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8730       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
8731   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
8732       .addFrameIndex(FI)
8733       .addImm(0)
8734       .addMemOperand(MMOLo);
8735   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
8736       .addFrameIndex(FI)
8737       .addImm(4)
8738       .addMemOperand(MMOHi);
8739   MI.eraseFromParent(); // The pseudo instruction is gone now.
8740   return BB;
8741 }
8742 
8743 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
8744                                                  MachineBasicBlock *BB) {
8745   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
8746          "Unexpected instruction");
8747 
8748   MachineFunction &MF = *BB->getParent();
8749   DebugLoc DL = MI.getDebugLoc();
8750   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
8751   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
8752   Register DstReg = MI.getOperand(0).getReg();
8753   Register LoReg = MI.getOperand(1).getReg();
8754   Register HiReg = MI.getOperand(2).getReg();
8755   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
8756   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
8757 
8758   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
8759   MachineMemOperand *MMOLo =
8760       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
8761   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
8762       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
8763   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8764       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
8765       .addFrameIndex(FI)
8766       .addImm(0)
8767       .addMemOperand(MMOLo);
8768   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
8769       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
8770       .addFrameIndex(FI)
8771       .addImm(4)
8772       .addMemOperand(MMOHi);
8773   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
8774   MI.eraseFromParent(); // The pseudo instruction is gone now.
8775   return BB;
8776 }
8777 
8778 static bool isSelectPseudo(MachineInstr &MI) {
8779   switch (MI.getOpcode()) {
8780   default:
8781     return false;
8782   case RISCV::Select_GPR_Using_CC_GPR:
8783   case RISCV::Select_FPR16_Using_CC_GPR:
8784   case RISCV::Select_FPR32_Using_CC_GPR:
8785   case RISCV::Select_FPR64_Using_CC_GPR:
8786     return true;
8787   }
8788 }
8789 
8790 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
8791                                         unsigned RelOpcode, unsigned EqOpcode,
8792                                         const RISCVSubtarget &Subtarget) {
8793   DebugLoc DL = MI.getDebugLoc();
8794   Register DstReg = MI.getOperand(0).getReg();
8795   Register Src1Reg = MI.getOperand(1).getReg();
8796   Register Src2Reg = MI.getOperand(2).getReg();
8797   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
8798   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
8799   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
8800 
8801   // Save the current FFLAGS.
8802   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
8803 
8804   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
8805                  .addReg(Src1Reg)
8806                  .addReg(Src2Reg);
8807   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8808     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
8809 
8810   // Restore the FFLAGS.
8811   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
8812       .addReg(SavedFFlags, RegState::Kill);
8813 
8814   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
8815   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
8816                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
8817                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
8818   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
8819     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
8820 
8821   // Erase the pseudoinstruction.
8822   MI.eraseFromParent();
8823   return BB;
8824 }
8825 
8826 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
8827                                            MachineBasicBlock *BB,
8828                                            const RISCVSubtarget &Subtarget) {
8829   // To "insert" Select_* instructions, we actually have to insert the triangle
8830   // control-flow pattern.  The incoming instructions know the destination vreg
8831   // to set, the condition code register to branch on, the true/false values to
8832   // select between, and the condcode to use to select the appropriate branch.
8833   //
8834   // We produce the following control flow:
8835   //     HeadMBB
8836   //     |  \
8837   //     |  IfFalseMBB
8838   //     | /
8839   //    TailMBB
8840   //
8841   // When we find a sequence of selects we attempt to optimize their emission
8842   // by sharing the control flow. Currently we only handle cases where we have
8843   // multiple selects with the exact same condition (same LHS, RHS and CC).
8844   // The selects may be interleaved with other instructions if the other
8845   // instructions meet some requirements we deem safe:
8846   // - They are debug instructions. Otherwise,
8847   // - They do not have side-effects, do not access memory and their inputs do
8848   //   not depend on the results of the select pseudo-instructions.
8849   // The TrueV/FalseV operands of the selects cannot depend on the result of
8850   // previous selects in the sequence.
8851   // These conditions could be further relaxed. See the X86 target for a
8852   // related approach and more information.
8853   Register LHS = MI.getOperand(1).getReg();
8854   Register RHS = MI.getOperand(2).getReg();
8855   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
8856 
8857   SmallVector<MachineInstr *, 4> SelectDebugValues;
8858   SmallSet<Register, 4> SelectDests;
8859   SelectDests.insert(MI.getOperand(0).getReg());
8860 
8861   MachineInstr *LastSelectPseudo = &MI;
8862 
8863   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
8864        SequenceMBBI != E; ++SequenceMBBI) {
8865     if (SequenceMBBI->isDebugInstr())
8866       continue;
8867     else if (isSelectPseudo(*SequenceMBBI)) {
8868       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
8869           SequenceMBBI->getOperand(2).getReg() != RHS ||
8870           SequenceMBBI->getOperand(3).getImm() != CC ||
8871           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
8872           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
8873         break;
8874       LastSelectPseudo = &*SequenceMBBI;
8875       SequenceMBBI->collectDebugValues(SelectDebugValues);
8876       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
8877     } else {
8878       if (SequenceMBBI->hasUnmodeledSideEffects() ||
8879           SequenceMBBI->mayLoadOrStore())
8880         break;
8881       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
8882             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
8883           }))
8884         break;
8885     }
8886   }
8887 
8888   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
8889   const BasicBlock *LLVM_BB = BB->getBasicBlock();
8890   DebugLoc DL = MI.getDebugLoc();
8891   MachineFunction::iterator I = ++BB->getIterator();
8892 
8893   MachineBasicBlock *HeadMBB = BB;
8894   MachineFunction *F = BB->getParent();
8895   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
8896   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
8897 
8898   F->insert(I, IfFalseMBB);
8899   F->insert(I, TailMBB);
8900 
8901   // Transfer debug instructions associated with the selects to TailMBB.
8902   for (MachineInstr *DebugInstr : SelectDebugValues) {
8903     TailMBB->push_back(DebugInstr->removeFromParent());
8904   }
8905 
8906   // Move all instructions after the sequence to TailMBB.
8907   TailMBB->splice(TailMBB->end(), HeadMBB,
8908                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
8909   // Update machine-CFG edges by transferring all successors of the current
8910   // block to the new block which will contain the Phi nodes for the selects.
8911   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
8912   // Set the successors for HeadMBB.
8913   HeadMBB->addSuccessor(IfFalseMBB);
8914   HeadMBB->addSuccessor(TailMBB);
8915 
8916   // Insert appropriate branch.
8917   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
8918     .addReg(LHS)
8919     .addReg(RHS)
8920     .addMBB(TailMBB);
8921 
8922   // IfFalseMBB just falls through to TailMBB.
8923   IfFalseMBB->addSuccessor(TailMBB);
8924 
8925   // Create PHIs for all of the select pseudo-instructions.
8926   auto SelectMBBI = MI.getIterator();
8927   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
8928   auto InsertionPoint = TailMBB->begin();
8929   while (SelectMBBI != SelectEnd) {
8930     auto Next = std::next(SelectMBBI);
8931     if (isSelectPseudo(*SelectMBBI)) {
8932       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
8933       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
8934               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
8935           .addReg(SelectMBBI->getOperand(4).getReg())
8936           .addMBB(HeadMBB)
8937           .addReg(SelectMBBI->getOperand(5).getReg())
8938           .addMBB(IfFalseMBB);
8939       SelectMBBI->eraseFromParent();
8940     }
8941     SelectMBBI = Next;
8942   }
8943 
8944   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
8945   return TailMBB;
8946 }
8947 
8948 MachineBasicBlock *
8949 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8950                                                  MachineBasicBlock *BB) const {
8951   switch (MI.getOpcode()) {
8952   default:
8953     llvm_unreachable("Unexpected instr type to insert");
8954   case RISCV::ReadCycleWide:
8955     assert(!Subtarget.is64Bit() &&
8956            "ReadCycleWrite is only to be used on riscv32");
8957     return emitReadCycleWidePseudo(MI, BB);
8958   case RISCV::Select_GPR_Using_CC_GPR:
8959   case RISCV::Select_FPR16_Using_CC_GPR:
8960   case RISCV::Select_FPR32_Using_CC_GPR:
8961   case RISCV::Select_FPR64_Using_CC_GPR:
8962     return emitSelectPseudo(MI, BB, Subtarget);
8963   case RISCV::BuildPairF64Pseudo:
8964     return emitBuildPairF64Pseudo(MI, BB);
8965   case RISCV::SplitF64Pseudo:
8966     return emitSplitF64Pseudo(MI, BB);
8967   case RISCV::PseudoQuietFLE_H:
8968     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
8969   case RISCV::PseudoQuietFLT_H:
8970     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
8971   case RISCV::PseudoQuietFLE_S:
8972     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
8973   case RISCV::PseudoQuietFLT_S:
8974     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
8975   case RISCV::PseudoQuietFLE_D:
8976     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
8977   case RISCV::PseudoQuietFLT_D:
8978     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
8979   }
8980 }
8981 
8982 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
8983                                                         SDNode *Node) const {
8984   // Add FRM dependency to any instructions with dynamic rounding mode.
8985   unsigned Opc = MI.getOpcode();
8986   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
8987   if (Idx < 0)
8988     return;
8989   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
8990     return;
8991   // If the instruction already reads FRM, don't add another read.
8992   if (MI.readsRegister(RISCV::FRM))
8993     return;
8994   MI.addOperand(
8995       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
8996 }
8997 
8998 // Calling Convention Implementation.
8999 // The expectations for frontend ABI lowering vary from target to target.
9000 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9001 // details, but this is a longer term goal. For now, we simply try to keep the
9002 // role of the frontend as simple and well-defined as possible. The rules can
9003 // be summarised as:
9004 // * Never split up large scalar arguments. We handle them here.
9005 // * If a hardfloat calling convention is being used, and the struct may be
9006 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9007 // available, then pass as two separate arguments. If either the GPRs or FPRs
9008 // are exhausted, then pass according to the rule below.
9009 // * If a struct could never be passed in registers or directly in a stack
9010 // slot (as it is larger than 2*XLEN and the floating point rules don't
9011 // apply), then pass it using a pointer with the byval attribute.
9012 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9013 // word-sized array or a 2*XLEN scalar (depending on alignment).
9014 // * The frontend can determine whether a struct is returned by reference or
9015 // not based on its size and fields. If it will be returned by reference, the
9016 // frontend must modify the prototype so a pointer with the sret annotation is
9017 // passed as the first argument. This is not necessary for large scalar
9018 // returns.
9019 // * Struct return values and varargs should be coerced to structs containing
9020 // register-size fields in the same situations they would be for fixed
9021 // arguments.
9022 
9023 static const MCPhysReg ArgGPRs[] = {
9024   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9025   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9026 };
9027 static const MCPhysReg ArgFPR16s[] = {
9028   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9029   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9030 };
9031 static const MCPhysReg ArgFPR32s[] = {
9032   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9033   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9034 };
9035 static const MCPhysReg ArgFPR64s[] = {
9036   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9037   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9038 };
9039 // This is an interim calling convention and it may be changed in the future.
9040 static const MCPhysReg ArgVRs[] = {
9041     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9042     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9043     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9044 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9045                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9046                                      RISCV::V20M2, RISCV::V22M2};
9047 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9048                                      RISCV::V20M4};
9049 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9050 
9051 // Pass a 2*XLEN argument that has been split into two XLEN values through
9052 // registers or the stack as necessary.
9053 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9054                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9055                                 MVT ValVT2, MVT LocVT2,
9056                                 ISD::ArgFlagsTy ArgFlags2) {
9057   unsigned XLenInBytes = XLen / 8;
9058   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9059     // At least one half can be passed via register.
9060     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9061                                      VA1.getLocVT(), CCValAssign::Full));
9062   } else {
9063     // Both halves must be passed on the stack, with proper alignment.
9064     Align StackAlign =
9065         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9066     State.addLoc(
9067         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9068                             State.AllocateStack(XLenInBytes, StackAlign),
9069                             VA1.getLocVT(), CCValAssign::Full));
9070     State.addLoc(CCValAssign::getMem(
9071         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9072         LocVT2, CCValAssign::Full));
9073     return false;
9074   }
9075 
9076   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9077     // The second half can also be passed via register.
9078     State.addLoc(
9079         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9080   } else {
9081     // The second half is passed via the stack, without additional alignment.
9082     State.addLoc(CCValAssign::getMem(
9083         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9084         LocVT2, CCValAssign::Full));
9085   }
9086 
9087   return false;
9088 }
9089 
9090 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9091                                Optional<unsigned> FirstMaskArgument,
9092                                CCState &State, const RISCVTargetLowering &TLI) {
9093   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9094   if (RC == &RISCV::VRRegClass) {
9095     // Assign the first mask argument to V0.
9096     // This is an interim calling convention and it may be changed in the
9097     // future.
9098     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9099       return State.AllocateReg(RISCV::V0);
9100     return State.AllocateReg(ArgVRs);
9101   }
9102   if (RC == &RISCV::VRM2RegClass)
9103     return State.AllocateReg(ArgVRM2s);
9104   if (RC == &RISCV::VRM4RegClass)
9105     return State.AllocateReg(ArgVRM4s);
9106   if (RC == &RISCV::VRM8RegClass)
9107     return State.AllocateReg(ArgVRM8s);
9108   llvm_unreachable("Unhandled register class for ValueType");
9109 }
9110 
9111 // Implements the RISC-V calling convention. Returns true upon failure.
9112 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9113                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9114                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9115                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9116                      Optional<unsigned> FirstMaskArgument) {
9117   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9118   assert(XLen == 32 || XLen == 64);
9119   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9120 
9121   // Any return value split in to more than two values can't be returned
9122   // directly. Vectors are returned via the available vector registers.
9123   if (!LocVT.isVector() && IsRet && ValNo > 1)
9124     return true;
9125 
9126   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9127   // variadic argument, or if no F16/F32 argument registers are available.
9128   bool UseGPRForF16_F32 = true;
9129   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9130   // variadic argument, or if no F64 argument registers are available.
9131   bool UseGPRForF64 = true;
9132 
9133   switch (ABI) {
9134   default:
9135     llvm_unreachable("Unexpected ABI");
9136   case RISCVABI::ABI_ILP32:
9137   case RISCVABI::ABI_LP64:
9138     break;
9139   case RISCVABI::ABI_ILP32F:
9140   case RISCVABI::ABI_LP64F:
9141     UseGPRForF16_F32 = !IsFixed;
9142     break;
9143   case RISCVABI::ABI_ILP32D:
9144   case RISCVABI::ABI_LP64D:
9145     UseGPRForF16_F32 = !IsFixed;
9146     UseGPRForF64 = !IsFixed;
9147     break;
9148   }
9149 
9150   // FPR16, FPR32, and FPR64 alias each other.
9151   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9152     UseGPRForF16_F32 = true;
9153     UseGPRForF64 = true;
9154   }
9155 
9156   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9157   // similar local variables rather than directly checking against the target
9158   // ABI.
9159 
9160   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9161     LocVT = XLenVT;
9162     LocInfo = CCValAssign::BCvt;
9163   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9164     LocVT = MVT::i64;
9165     LocInfo = CCValAssign::BCvt;
9166   }
9167 
9168   // If this is a variadic argument, the RISC-V calling convention requires
9169   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9170   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9171   // be used regardless of whether the original argument was split during
9172   // legalisation or not. The argument will not be passed by registers if the
9173   // original type is larger than 2*XLEN, so the register alignment rule does
9174   // not apply.
9175   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9176   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9177       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9178     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9179     // Skip 'odd' register if necessary.
9180     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9181       State.AllocateReg(ArgGPRs);
9182   }
9183 
9184   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9185   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9186       State.getPendingArgFlags();
9187 
9188   assert(PendingLocs.size() == PendingArgFlags.size() &&
9189          "PendingLocs and PendingArgFlags out of sync");
9190 
9191   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9192   // registers are exhausted.
9193   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9194     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9195            "Can't lower f64 if it is split");
9196     // Depending on available argument GPRS, f64 may be passed in a pair of
9197     // GPRs, split between a GPR and the stack, or passed completely on the
9198     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9199     // cases.
9200     Register Reg = State.AllocateReg(ArgGPRs);
9201     LocVT = MVT::i32;
9202     if (!Reg) {
9203       unsigned StackOffset = State.AllocateStack(8, Align(8));
9204       State.addLoc(
9205           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9206       return false;
9207     }
9208     if (!State.AllocateReg(ArgGPRs))
9209       State.AllocateStack(4, Align(4));
9210     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9211     return false;
9212   }
9213 
9214   // Fixed-length vectors are located in the corresponding scalable-vector
9215   // container types.
9216   if (ValVT.isFixedLengthVector())
9217     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9218 
9219   // Split arguments might be passed indirectly, so keep track of the pending
9220   // values. Split vectors are passed via a mix of registers and indirectly, so
9221   // treat them as we would any other argument.
9222   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9223     LocVT = XLenVT;
9224     LocInfo = CCValAssign::Indirect;
9225     PendingLocs.push_back(
9226         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9227     PendingArgFlags.push_back(ArgFlags);
9228     if (!ArgFlags.isSplitEnd()) {
9229       return false;
9230     }
9231   }
9232 
9233   // If the split argument only had two elements, it should be passed directly
9234   // in registers or on the stack.
9235   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9236       PendingLocs.size() <= 2) {
9237     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9238     // Apply the normal calling convention rules to the first half of the
9239     // split argument.
9240     CCValAssign VA = PendingLocs[0];
9241     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9242     PendingLocs.clear();
9243     PendingArgFlags.clear();
9244     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9245                                ArgFlags);
9246   }
9247 
9248   // Allocate to a register if possible, or else a stack slot.
9249   Register Reg;
9250   unsigned StoreSizeBytes = XLen / 8;
9251   Align StackAlign = Align(XLen / 8);
9252 
9253   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9254     Reg = State.AllocateReg(ArgFPR16s);
9255   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9256     Reg = State.AllocateReg(ArgFPR32s);
9257   else if (ValVT == MVT::f64 && !UseGPRForF64)
9258     Reg = State.AllocateReg(ArgFPR64s);
9259   else if (ValVT.isVector()) {
9260     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9261     if (!Reg) {
9262       // For return values, the vector must be passed fully via registers or
9263       // via the stack.
9264       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9265       // but we're using all of them.
9266       if (IsRet)
9267         return true;
9268       // Try using a GPR to pass the address
9269       if ((Reg = State.AllocateReg(ArgGPRs))) {
9270         LocVT = XLenVT;
9271         LocInfo = CCValAssign::Indirect;
9272       } else if (ValVT.isScalableVector()) {
9273         LocVT = XLenVT;
9274         LocInfo = CCValAssign::Indirect;
9275       } else {
9276         // Pass fixed-length vectors on the stack.
9277         LocVT = ValVT;
9278         StoreSizeBytes = ValVT.getStoreSize();
9279         // Align vectors to their element sizes, being careful for vXi1
9280         // vectors.
9281         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9282       }
9283     }
9284   } else {
9285     Reg = State.AllocateReg(ArgGPRs);
9286   }
9287 
9288   unsigned StackOffset =
9289       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9290 
9291   // If we reach this point and PendingLocs is non-empty, we must be at the
9292   // end of a split argument that must be passed indirectly.
9293   if (!PendingLocs.empty()) {
9294     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9295     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9296 
9297     for (auto &It : PendingLocs) {
9298       if (Reg)
9299         It.convertToReg(Reg);
9300       else
9301         It.convertToMem(StackOffset);
9302       State.addLoc(It);
9303     }
9304     PendingLocs.clear();
9305     PendingArgFlags.clear();
9306     return false;
9307   }
9308 
9309   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9310           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9311          "Expected an XLenVT or vector types at this stage");
9312 
9313   if (Reg) {
9314     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9315     return false;
9316   }
9317 
9318   // When a floating-point value is passed on the stack, no bit-conversion is
9319   // needed.
9320   if (ValVT.isFloatingPoint()) {
9321     LocVT = ValVT;
9322     LocInfo = CCValAssign::Full;
9323   }
9324   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9325   return false;
9326 }
9327 
9328 template <typename ArgTy>
9329 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9330   for (const auto &ArgIdx : enumerate(Args)) {
9331     MVT ArgVT = ArgIdx.value().VT;
9332     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9333       return ArgIdx.index();
9334   }
9335   return None;
9336 }
9337 
9338 void RISCVTargetLowering::analyzeInputArgs(
9339     MachineFunction &MF, CCState &CCInfo,
9340     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9341     RISCVCCAssignFn Fn) const {
9342   unsigned NumArgs = Ins.size();
9343   FunctionType *FType = MF.getFunction().getFunctionType();
9344 
9345   Optional<unsigned> FirstMaskArgument;
9346   if (Subtarget.hasVInstructions())
9347     FirstMaskArgument = preAssignMask(Ins);
9348 
9349   for (unsigned i = 0; i != NumArgs; ++i) {
9350     MVT ArgVT = Ins[i].VT;
9351     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9352 
9353     Type *ArgTy = nullptr;
9354     if (IsRet)
9355       ArgTy = FType->getReturnType();
9356     else if (Ins[i].isOrigArg())
9357       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9358 
9359     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9360     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9361            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9362            FirstMaskArgument)) {
9363       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9364                         << EVT(ArgVT).getEVTString() << '\n');
9365       llvm_unreachable(nullptr);
9366     }
9367   }
9368 }
9369 
9370 void RISCVTargetLowering::analyzeOutputArgs(
9371     MachineFunction &MF, CCState &CCInfo,
9372     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9373     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9374   unsigned NumArgs = Outs.size();
9375 
9376   Optional<unsigned> FirstMaskArgument;
9377   if (Subtarget.hasVInstructions())
9378     FirstMaskArgument = preAssignMask(Outs);
9379 
9380   for (unsigned i = 0; i != NumArgs; i++) {
9381     MVT ArgVT = Outs[i].VT;
9382     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9383     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9384 
9385     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9386     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9387            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9388            FirstMaskArgument)) {
9389       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9390                         << EVT(ArgVT).getEVTString() << "\n");
9391       llvm_unreachable(nullptr);
9392     }
9393   }
9394 }
9395 
9396 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9397 // values.
9398 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9399                                    const CCValAssign &VA, const SDLoc &DL,
9400                                    const RISCVSubtarget &Subtarget) {
9401   switch (VA.getLocInfo()) {
9402   default:
9403     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9404   case CCValAssign::Full:
9405     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9406       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9407     break;
9408   case CCValAssign::BCvt:
9409     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9410       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9411     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9412       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9413     else
9414       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9415     break;
9416   }
9417   return Val;
9418 }
9419 
9420 // The caller is responsible for loading the full value if the argument is
9421 // passed with CCValAssign::Indirect.
9422 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9423                                 const CCValAssign &VA, const SDLoc &DL,
9424                                 const RISCVTargetLowering &TLI) {
9425   MachineFunction &MF = DAG.getMachineFunction();
9426   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9427   EVT LocVT = VA.getLocVT();
9428   SDValue Val;
9429   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9430   Register VReg = RegInfo.createVirtualRegister(RC);
9431   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9432   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9433 
9434   if (VA.getLocInfo() == CCValAssign::Indirect)
9435     return Val;
9436 
9437   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9438 }
9439 
9440 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9441                                    const CCValAssign &VA, const SDLoc &DL,
9442                                    const RISCVSubtarget &Subtarget) {
9443   EVT LocVT = VA.getLocVT();
9444 
9445   switch (VA.getLocInfo()) {
9446   default:
9447     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9448   case CCValAssign::Full:
9449     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9450       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9451     break;
9452   case CCValAssign::BCvt:
9453     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9454       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
9455     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9456       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
9457     else
9458       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
9459     break;
9460   }
9461   return Val;
9462 }
9463 
9464 // The caller is responsible for loading the full value if the argument is
9465 // passed with CCValAssign::Indirect.
9466 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
9467                                 const CCValAssign &VA, const SDLoc &DL) {
9468   MachineFunction &MF = DAG.getMachineFunction();
9469   MachineFrameInfo &MFI = MF.getFrameInfo();
9470   EVT LocVT = VA.getLocVT();
9471   EVT ValVT = VA.getValVT();
9472   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
9473   if (ValVT.isScalableVector()) {
9474     // When the value is a scalable vector, we save the pointer which points to
9475     // the scalable vector value in the stack. The ValVT will be the pointer
9476     // type, instead of the scalable vector type.
9477     ValVT = LocVT;
9478   }
9479   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
9480                                  /*IsImmutable=*/true);
9481   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
9482   SDValue Val;
9483 
9484   ISD::LoadExtType ExtType;
9485   switch (VA.getLocInfo()) {
9486   default:
9487     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9488   case CCValAssign::Full:
9489   case CCValAssign::Indirect:
9490   case CCValAssign::BCvt:
9491     ExtType = ISD::NON_EXTLOAD;
9492     break;
9493   }
9494   Val = DAG.getExtLoad(
9495       ExtType, DL, LocVT, Chain, FIN,
9496       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
9497   return Val;
9498 }
9499 
9500 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
9501                                        const CCValAssign &VA, const SDLoc &DL) {
9502   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
9503          "Unexpected VA");
9504   MachineFunction &MF = DAG.getMachineFunction();
9505   MachineFrameInfo &MFI = MF.getFrameInfo();
9506   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9507 
9508   if (VA.isMemLoc()) {
9509     // f64 is passed on the stack.
9510     int FI =
9511         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
9512     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9513     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
9514                        MachinePointerInfo::getFixedStack(MF, FI));
9515   }
9516 
9517   assert(VA.isRegLoc() && "Expected register VA assignment");
9518 
9519   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9520   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
9521   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
9522   SDValue Hi;
9523   if (VA.getLocReg() == RISCV::X17) {
9524     // Second half of f64 is passed on the stack.
9525     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
9526     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
9527     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
9528                      MachinePointerInfo::getFixedStack(MF, FI));
9529   } else {
9530     // Second half of f64 is passed in another GPR.
9531     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9532     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
9533     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
9534   }
9535   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
9536 }
9537 
9538 // FastCC has less than 1% performance improvement for some particular
9539 // benchmark. But theoretically, it may has benenfit for some cases.
9540 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
9541                             unsigned ValNo, MVT ValVT, MVT LocVT,
9542                             CCValAssign::LocInfo LocInfo,
9543                             ISD::ArgFlagsTy ArgFlags, CCState &State,
9544                             bool IsFixed, bool IsRet, Type *OrigTy,
9545                             const RISCVTargetLowering &TLI,
9546                             Optional<unsigned> FirstMaskArgument) {
9547 
9548   // X5 and X6 might be used for save-restore libcall.
9549   static const MCPhysReg GPRList[] = {
9550       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
9551       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
9552       RISCV::X29, RISCV::X30, RISCV::X31};
9553 
9554   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9555     if (unsigned Reg = State.AllocateReg(GPRList)) {
9556       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9557       return false;
9558     }
9559   }
9560 
9561   if (LocVT == MVT::f16) {
9562     static const MCPhysReg FPR16List[] = {
9563         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
9564         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
9565         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
9566         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
9567     if (unsigned Reg = State.AllocateReg(FPR16List)) {
9568       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9569       return false;
9570     }
9571   }
9572 
9573   if (LocVT == MVT::f32) {
9574     static const MCPhysReg FPR32List[] = {
9575         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
9576         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
9577         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
9578         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
9579     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9580       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9581       return false;
9582     }
9583   }
9584 
9585   if (LocVT == MVT::f64) {
9586     static const MCPhysReg FPR64List[] = {
9587         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
9588         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
9589         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
9590         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
9591     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9592       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9593       return false;
9594     }
9595   }
9596 
9597   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
9598     unsigned Offset4 = State.AllocateStack(4, Align(4));
9599     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
9600     return false;
9601   }
9602 
9603   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
9604     unsigned Offset5 = State.AllocateStack(8, Align(8));
9605     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
9606     return false;
9607   }
9608 
9609   if (LocVT.isVector()) {
9610     if (unsigned Reg =
9611             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
9612       // Fixed-length vectors are located in the corresponding scalable-vector
9613       // container types.
9614       if (ValVT.isFixedLengthVector())
9615         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9616       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9617     } else {
9618       // Try and pass the address via a "fast" GPR.
9619       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
9620         LocInfo = CCValAssign::Indirect;
9621         LocVT = TLI.getSubtarget().getXLenVT();
9622         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
9623       } else if (ValVT.isFixedLengthVector()) {
9624         auto StackAlign =
9625             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9626         unsigned StackOffset =
9627             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
9628         State.addLoc(
9629             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9630       } else {
9631         // Can't pass scalable vectors on the stack.
9632         return true;
9633       }
9634     }
9635 
9636     return false;
9637   }
9638 
9639   return true; // CC didn't match.
9640 }
9641 
9642 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
9643                          CCValAssign::LocInfo LocInfo,
9644                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
9645 
9646   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
9647     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
9648     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
9649     static const MCPhysReg GPRList[] = {
9650         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
9651         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
9652     if (unsigned Reg = State.AllocateReg(GPRList)) {
9653       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9654       return false;
9655     }
9656   }
9657 
9658   if (LocVT == MVT::f32) {
9659     // Pass in STG registers: F1, ..., F6
9660     //                        fs0 ... fs5
9661     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
9662                                           RISCV::F18_F, RISCV::F19_F,
9663                                           RISCV::F20_F, RISCV::F21_F};
9664     if (unsigned Reg = State.AllocateReg(FPR32List)) {
9665       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9666       return false;
9667     }
9668   }
9669 
9670   if (LocVT == MVT::f64) {
9671     // Pass in STG registers: D1, ..., D6
9672     //                        fs6 ... fs11
9673     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
9674                                           RISCV::F24_D, RISCV::F25_D,
9675                                           RISCV::F26_D, RISCV::F27_D};
9676     if (unsigned Reg = State.AllocateReg(FPR64List)) {
9677       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9678       return false;
9679     }
9680   }
9681 
9682   report_fatal_error("No registers left in GHC calling convention");
9683   return true;
9684 }
9685 
9686 // Transform physical registers into virtual registers.
9687 SDValue RISCVTargetLowering::LowerFormalArguments(
9688     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
9689     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
9690     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
9691 
9692   MachineFunction &MF = DAG.getMachineFunction();
9693 
9694   switch (CallConv) {
9695   default:
9696     report_fatal_error("Unsupported calling convention");
9697   case CallingConv::C:
9698   case CallingConv::Fast:
9699     break;
9700   case CallingConv::GHC:
9701     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
9702         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
9703       report_fatal_error(
9704         "GHC calling convention requires the F and D instruction set extensions");
9705   }
9706 
9707   const Function &Func = MF.getFunction();
9708   if (Func.hasFnAttribute("interrupt")) {
9709     if (!Func.arg_empty())
9710       report_fatal_error(
9711         "Functions with the interrupt attribute cannot have arguments!");
9712 
9713     StringRef Kind =
9714       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9715 
9716     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
9717       report_fatal_error(
9718         "Function interrupt attribute argument not supported!");
9719   }
9720 
9721   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9722   MVT XLenVT = Subtarget.getXLenVT();
9723   unsigned XLenInBytes = Subtarget.getXLen() / 8;
9724   // Used with vargs to acumulate store chains.
9725   std::vector<SDValue> OutChains;
9726 
9727   // Assign locations to all of the incoming arguments.
9728   SmallVector<CCValAssign, 16> ArgLocs;
9729   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9730 
9731   if (CallConv == CallingConv::GHC)
9732     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
9733   else
9734     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
9735                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9736                                                    : CC_RISCV);
9737 
9738   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
9739     CCValAssign &VA = ArgLocs[i];
9740     SDValue ArgValue;
9741     // Passing f64 on RV32D with a soft float ABI must be handled as a special
9742     // case.
9743     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
9744       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
9745     else if (VA.isRegLoc())
9746       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
9747     else
9748       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
9749 
9750     if (VA.getLocInfo() == CCValAssign::Indirect) {
9751       // If the original argument was split and passed by reference (e.g. i128
9752       // on RV32), we need to load all parts of it here (using the same
9753       // address). Vectors may be partly split to registers and partly to the
9754       // stack, in which case the base address is partly offset and subsequent
9755       // stores are relative to that.
9756       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
9757                                    MachinePointerInfo()));
9758       unsigned ArgIndex = Ins[i].OrigArgIndex;
9759       unsigned ArgPartOffset = Ins[i].PartOffset;
9760       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
9761       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
9762         CCValAssign &PartVA = ArgLocs[i + 1];
9763         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
9764         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
9765         if (PartVA.getValVT().isScalableVector())
9766           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
9767         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
9768         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
9769                                      MachinePointerInfo()));
9770         ++i;
9771       }
9772       continue;
9773     }
9774     InVals.push_back(ArgValue);
9775   }
9776 
9777   if (IsVarArg) {
9778     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
9779     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
9780     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
9781     MachineFrameInfo &MFI = MF.getFrameInfo();
9782     MachineRegisterInfo &RegInfo = MF.getRegInfo();
9783     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
9784 
9785     // Offset of the first variable argument from stack pointer, and size of
9786     // the vararg save area. For now, the varargs save area is either zero or
9787     // large enough to hold a0-a7.
9788     int VaArgOffset, VarArgsSaveSize;
9789 
9790     // If all registers are allocated, then all varargs must be passed on the
9791     // stack and we don't need to save any argregs.
9792     if (ArgRegs.size() == Idx) {
9793       VaArgOffset = CCInfo.getNextStackOffset();
9794       VarArgsSaveSize = 0;
9795     } else {
9796       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
9797       VaArgOffset = -VarArgsSaveSize;
9798     }
9799 
9800     // Record the frame index of the first variable argument
9801     // which is a value necessary to VASTART.
9802     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9803     RVFI->setVarArgsFrameIndex(FI);
9804 
9805     // If saving an odd number of registers then create an extra stack slot to
9806     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
9807     // offsets to even-numbered registered remain 2*XLEN-aligned.
9808     if (Idx % 2) {
9809       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
9810       VarArgsSaveSize += XLenInBytes;
9811     }
9812 
9813     // Copy the integer registers that may have been used for passing varargs
9814     // to the vararg save area.
9815     for (unsigned I = Idx; I < ArgRegs.size();
9816          ++I, VaArgOffset += XLenInBytes) {
9817       const Register Reg = RegInfo.createVirtualRegister(RC);
9818       RegInfo.addLiveIn(ArgRegs[I], Reg);
9819       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
9820       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
9821       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9822       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
9823                                    MachinePointerInfo::getFixedStack(MF, FI));
9824       cast<StoreSDNode>(Store.getNode())
9825           ->getMemOperand()
9826           ->setValue((Value *)nullptr);
9827       OutChains.push_back(Store);
9828     }
9829     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
9830   }
9831 
9832   // All stores are grouped in one node to allow the matching between
9833   // the size of Ins and InVals. This only happens for vararg functions.
9834   if (!OutChains.empty()) {
9835     OutChains.push_back(Chain);
9836     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
9837   }
9838 
9839   return Chain;
9840 }
9841 
9842 /// isEligibleForTailCallOptimization - Check whether the call is eligible
9843 /// for tail call optimization.
9844 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
9845 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
9846     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
9847     const SmallVector<CCValAssign, 16> &ArgLocs) const {
9848 
9849   auto &Callee = CLI.Callee;
9850   auto CalleeCC = CLI.CallConv;
9851   auto &Outs = CLI.Outs;
9852   auto &Caller = MF.getFunction();
9853   auto CallerCC = Caller.getCallingConv();
9854 
9855   // Exception-handling functions need a special set of instructions to
9856   // indicate a return to the hardware. Tail-calling another function would
9857   // probably break this.
9858   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
9859   // should be expanded as new function attributes are introduced.
9860   if (Caller.hasFnAttribute("interrupt"))
9861     return false;
9862 
9863   // Do not tail call opt if the stack is used to pass parameters.
9864   if (CCInfo.getNextStackOffset() != 0)
9865     return false;
9866 
9867   // Do not tail call opt if any parameters need to be passed indirectly.
9868   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
9869   // passed indirectly. So the address of the value will be passed in a
9870   // register, or if not available, then the address is put on the stack. In
9871   // order to pass indirectly, space on the stack often needs to be allocated
9872   // in order to store the value. In this case the CCInfo.getNextStackOffset()
9873   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
9874   // are passed CCValAssign::Indirect.
9875   for (auto &VA : ArgLocs)
9876     if (VA.getLocInfo() == CCValAssign::Indirect)
9877       return false;
9878 
9879   // Do not tail call opt if either caller or callee uses struct return
9880   // semantics.
9881   auto IsCallerStructRet = Caller.hasStructRetAttr();
9882   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
9883   if (IsCallerStructRet || IsCalleeStructRet)
9884     return false;
9885 
9886   // Externally-defined functions with weak linkage should not be
9887   // tail-called. The behaviour of branch instructions in this situation (as
9888   // used for tail calls) is implementation-defined, so we cannot rely on the
9889   // linker replacing the tail call with a return.
9890   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
9891     const GlobalValue *GV = G->getGlobal();
9892     if (GV->hasExternalWeakLinkage())
9893       return false;
9894   }
9895 
9896   // The callee has to preserve all registers the caller needs to preserve.
9897   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
9898   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
9899   if (CalleeCC != CallerCC) {
9900     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
9901     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
9902       return false;
9903   }
9904 
9905   // Byval parameters hand the function a pointer directly into the stack area
9906   // we want to reuse during a tail call. Working around this *is* possible
9907   // but less efficient and uglier in LowerCall.
9908   for (auto &Arg : Outs)
9909     if (Arg.Flags.isByVal())
9910       return false;
9911 
9912   return true;
9913 }
9914 
9915 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
9916   return DAG.getDataLayout().getPrefTypeAlign(
9917       VT.getTypeForEVT(*DAG.getContext()));
9918 }
9919 
9920 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
9921 // and output parameter nodes.
9922 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
9923                                        SmallVectorImpl<SDValue> &InVals) const {
9924   SelectionDAG &DAG = CLI.DAG;
9925   SDLoc &DL = CLI.DL;
9926   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
9927   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
9928   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
9929   SDValue Chain = CLI.Chain;
9930   SDValue Callee = CLI.Callee;
9931   bool &IsTailCall = CLI.IsTailCall;
9932   CallingConv::ID CallConv = CLI.CallConv;
9933   bool IsVarArg = CLI.IsVarArg;
9934   EVT PtrVT = getPointerTy(DAG.getDataLayout());
9935   MVT XLenVT = Subtarget.getXLenVT();
9936 
9937   MachineFunction &MF = DAG.getMachineFunction();
9938 
9939   // Analyze the operands of the call, assigning locations to each operand.
9940   SmallVector<CCValAssign, 16> ArgLocs;
9941   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
9942 
9943   if (CallConv == CallingConv::GHC)
9944     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
9945   else
9946     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
9947                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
9948                                                     : CC_RISCV);
9949 
9950   // Check if it's really possible to do a tail call.
9951   if (IsTailCall)
9952     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
9953 
9954   if (IsTailCall)
9955     ++NumTailCalls;
9956   else if (CLI.CB && CLI.CB->isMustTailCall())
9957     report_fatal_error("failed to perform tail call elimination on a call "
9958                        "site marked musttail");
9959 
9960   // Get a count of how many bytes are to be pushed on the stack.
9961   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
9962 
9963   // Create local copies for byval args
9964   SmallVector<SDValue, 8> ByValArgs;
9965   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
9966     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9967     if (!Flags.isByVal())
9968       continue;
9969 
9970     SDValue Arg = OutVals[i];
9971     unsigned Size = Flags.getByValSize();
9972     Align Alignment = Flags.getNonZeroByValAlign();
9973 
9974     int FI =
9975         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
9976     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
9977     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
9978 
9979     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
9980                           /*IsVolatile=*/false,
9981                           /*AlwaysInline=*/false, IsTailCall,
9982                           MachinePointerInfo(), MachinePointerInfo());
9983     ByValArgs.push_back(FIPtr);
9984   }
9985 
9986   if (!IsTailCall)
9987     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
9988 
9989   // Copy argument values to their designated locations.
9990   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
9991   SmallVector<SDValue, 8> MemOpChains;
9992   SDValue StackPtr;
9993   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
9994     CCValAssign &VA = ArgLocs[i];
9995     SDValue ArgValue = OutVals[i];
9996     ISD::ArgFlagsTy Flags = Outs[i].Flags;
9997 
9998     // Handle passing f64 on RV32D with a soft float ABI as a special case.
9999     bool IsF64OnRV32DSoftABI =
10000         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10001     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10002       SDValue SplitF64 = DAG.getNode(
10003           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10004       SDValue Lo = SplitF64.getValue(0);
10005       SDValue Hi = SplitF64.getValue(1);
10006 
10007       Register RegLo = VA.getLocReg();
10008       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10009 
10010       if (RegLo == RISCV::X17) {
10011         // Second half of f64 is passed on the stack.
10012         // Work out the address of the stack slot.
10013         if (!StackPtr.getNode())
10014           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10015         // Emit the store.
10016         MemOpChains.push_back(
10017             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10018       } else {
10019         // Second half of f64 is passed in another GPR.
10020         assert(RegLo < RISCV::X31 && "Invalid register pair");
10021         Register RegHigh = RegLo + 1;
10022         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10023       }
10024       continue;
10025     }
10026 
10027     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10028     // as any other MemLoc.
10029 
10030     // Promote the value if needed.
10031     // For now, only handle fully promoted and indirect arguments.
10032     if (VA.getLocInfo() == CCValAssign::Indirect) {
10033       // Store the argument in a stack slot and pass its address.
10034       Align StackAlign =
10035           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10036                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10037       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10038       // If the original argument was split (e.g. i128), we need
10039       // to store the required parts of it here (and pass just one address).
10040       // Vectors may be partly split to registers and partly to the stack, in
10041       // which case the base address is partly offset and subsequent stores are
10042       // relative to that.
10043       unsigned ArgIndex = Outs[i].OrigArgIndex;
10044       unsigned ArgPartOffset = Outs[i].PartOffset;
10045       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10046       // Calculate the total size to store. We don't have access to what we're
10047       // actually storing other than performing the loop and collecting the
10048       // info.
10049       SmallVector<std::pair<SDValue, SDValue>> Parts;
10050       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10051         SDValue PartValue = OutVals[i + 1];
10052         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10053         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10054         EVT PartVT = PartValue.getValueType();
10055         if (PartVT.isScalableVector())
10056           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10057         StoredSize += PartVT.getStoreSize();
10058         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10059         Parts.push_back(std::make_pair(PartValue, Offset));
10060         ++i;
10061       }
10062       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10063       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10064       MemOpChains.push_back(
10065           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10066                        MachinePointerInfo::getFixedStack(MF, FI)));
10067       for (const auto &Part : Parts) {
10068         SDValue PartValue = Part.first;
10069         SDValue PartOffset = Part.second;
10070         SDValue Address =
10071             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10072         MemOpChains.push_back(
10073             DAG.getStore(Chain, DL, PartValue, Address,
10074                          MachinePointerInfo::getFixedStack(MF, FI)));
10075       }
10076       ArgValue = SpillSlot;
10077     } else {
10078       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10079     }
10080 
10081     // Use local copy if it is a byval arg.
10082     if (Flags.isByVal())
10083       ArgValue = ByValArgs[j++];
10084 
10085     if (VA.isRegLoc()) {
10086       // Queue up the argument copies and emit them at the end.
10087       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10088     } else {
10089       assert(VA.isMemLoc() && "Argument not register or memory");
10090       assert(!IsTailCall && "Tail call not allowed if stack is used "
10091                             "for passing parameters");
10092 
10093       // Work out the address of the stack slot.
10094       if (!StackPtr.getNode())
10095         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10096       SDValue Address =
10097           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10098                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10099 
10100       // Emit the store.
10101       MemOpChains.push_back(
10102           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10103     }
10104   }
10105 
10106   // Join the stores, which are independent of one another.
10107   if (!MemOpChains.empty())
10108     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10109 
10110   SDValue Glue;
10111 
10112   // Build a sequence of copy-to-reg nodes, chained and glued together.
10113   for (auto &Reg : RegsToPass) {
10114     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10115     Glue = Chain.getValue(1);
10116   }
10117 
10118   // Validate that none of the argument registers have been marked as
10119   // reserved, if so report an error. Do the same for the return address if this
10120   // is not a tailcall.
10121   validateCCReservedRegs(RegsToPass, MF);
10122   if (!IsTailCall &&
10123       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10124     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10125         MF.getFunction(),
10126         "Return address register required, but has been reserved."});
10127 
10128   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10129   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10130   // split it and then direct call can be matched by PseudoCALL.
10131   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10132     const GlobalValue *GV = S->getGlobal();
10133 
10134     unsigned OpFlags = RISCVII::MO_CALL;
10135     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10136       OpFlags = RISCVII::MO_PLT;
10137 
10138     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10139   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10140     unsigned OpFlags = RISCVII::MO_CALL;
10141 
10142     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10143                                                  nullptr))
10144       OpFlags = RISCVII::MO_PLT;
10145 
10146     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10147   }
10148 
10149   // The first call operand is the chain and the second is the target address.
10150   SmallVector<SDValue, 8> Ops;
10151   Ops.push_back(Chain);
10152   Ops.push_back(Callee);
10153 
10154   // Add argument registers to the end of the list so that they are
10155   // known live into the call.
10156   for (auto &Reg : RegsToPass)
10157     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10158 
10159   if (!IsTailCall) {
10160     // Add a register mask operand representing the call-preserved registers.
10161     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10162     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10163     assert(Mask && "Missing call preserved mask for calling convention");
10164     Ops.push_back(DAG.getRegisterMask(Mask));
10165   }
10166 
10167   // Glue the call to the argument copies, if any.
10168   if (Glue.getNode())
10169     Ops.push_back(Glue);
10170 
10171   // Emit the call.
10172   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10173 
10174   if (IsTailCall) {
10175     MF.getFrameInfo().setHasTailCall();
10176     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10177   }
10178 
10179   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10180   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10181   Glue = Chain.getValue(1);
10182 
10183   // Mark the end of the call, which is glued to the call itself.
10184   Chain = DAG.getCALLSEQ_END(Chain,
10185                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10186                              DAG.getConstant(0, DL, PtrVT, true),
10187                              Glue, DL);
10188   Glue = Chain.getValue(1);
10189 
10190   // Assign locations to each value returned by this call.
10191   SmallVector<CCValAssign, 16> RVLocs;
10192   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10193   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10194 
10195   // Copy all of the result registers out of their specified physreg.
10196   for (auto &VA : RVLocs) {
10197     // Copy the value out
10198     SDValue RetValue =
10199         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10200     // Glue the RetValue to the end of the call sequence
10201     Chain = RetValue.getValue(1);
10202     Glue = RetValue.getValue(2);
10203 
10204     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10205       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10206       SDValue RetValue2 =
10207           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10208       Chain = RetValue2.getValue(1);
10209       Glue = RetValue2.getValue(2);
10210       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10211                              RetValue2);
10212     }
10213 
10214     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10215 
10216     InVals.push_back(RetValue);
10217   }
10218 
10219   return Chain;
10220 }
10221 
10222 bool RISCVTargetLowering::CanLowerReturn(
10223     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10224     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10225   SmallVector<CCValAssign, 16> RVLocs;
10226   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10227 
10228   Optional<unsigned> FirstMaskArgument;
10229   if (Subtarget.hasVInstructions())
10230     FirstMaskArgument = preAssignMask(Outs);
10231 
10232   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10233     MVT VT = Outs[i].VT;
10234     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10235     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10236     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10237                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10238                  *this, FirstMaskArgument))
10239       return false;
10240   }
10241   return true;
10242 }
10243 
10244 SDValue
10245 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10246                                  bool IsVarArg,
10247                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10248                                  const SmallVectorImpl<SDValue> &OutVals,
10249                                  const SDLoc &DL, SelectionDAG &DAG) const {
10250   const MachineFunction &MF = DAG.getMachineFunction();
10251   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10252 
10253   // Stores the assignment of the return value to a location.
10254   SmallVector<CCValAssign, 16> RVLocs;
10255 
10256   // Info about the registers and stack slot.
10257   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10258                  *DAG.getContext());
10259 
10260   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10261                     nullptr, CC_RISCV);
10262 
10263   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10264     report_fatal_error("GHC functions return void only");
10265 
10266   SDValue Glue;
10267   SmallVector<SDValue, 4> RetOps(1, Chain);
10268 
10269   // Copy the result values into the output registers.
10270   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10271     SDValue Val = OutVals[i];
10272     CCValAssign &VA = RVLocs[i];
10273     assert(VA.isRegLoc() && "Can only return in registers!");
10274 
10275     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10276       // Handle returning f64 on RV32D with a soft float ABI.
10277       assert(VA.isRegLoc() && "Expected return via registers");
10278       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10279                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10280       SDValue Lo = SplitF64.getValue(0);
10281       SDValue Hi = SplitF64.getValue(1);
10282       Register RegLo = VA.getLocReg();
10283       assert(RegLo < RISCV::X31 && "Invalid register pair");
10284       Register RegHi = RegLo + 1;
10285 
10286       if (STI.isRegisterReservedByUser(RegLo) ||
10287           STI.isRegisterReservedByUser(RegHi))
10288         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10289             MF.getFunction(),
10290             "Return value register required, but has been reserved."});
10291 
10292       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10293       Glue = Chain.getValue(1);
10294       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10295       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10296       Glue = Chain.getValue(1);
10297       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10298     } else {
10299       // Handle a 'normal' return.
10300       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10301       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10302 
10303       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10304         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10305             MF.getFunction(),
10306             "Return value register required, but has been reserved."});
10307 
10308       // Guarantee that all emitted copies are stuck together.
10309       Glue = Chain.getValue(1);
10310       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10311     }
10312   }
10313 
10314   RetOps[0] = Chain; // Update chain.
10315 
10316   // Add the glue node if we have it.
10317   if (Glue.getNode()) {
10318     RetOps.push_back(Glue);
10319   }
10320 
10321   unsigned RetOpc = RISCVISD::RET_FLAG;
10322   // Interrupt service routines use different return instructions.
10323   const Function &Func = DAG.getMachineFunction().getFunction();
10324   if (Func.hasFnAttribute("interrupt")) {
10325     if (!Func.getReturnType()->isVoidTy())
10326       report_fatal_error(
10327           "Functions with the interrupt attribute must have void return type!");
10328 
10329     MachineFunction &MF = DAG.getMachineFunction();
10330     StringRef Kind =
10331       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10332 
10333     if (Kind == "user")
10334       RetOpc = RISCVISD::URET_FLAG;
10335     else if (Kind == "supervisor")
10336       RetOpc = RISCVISD::SRET_FLAG;
10337     else
10338       RetOpc = RISCVISD::MRET_FLAG;
10339   }
10340 
10341   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10342 }
10343 
10344 void RISCVTargetLowering::validateCCReservedRegs(
10345     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10346     MachineFunction &MF) const {
10347   const Function &F = MF.getFunction();
10348   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10349 
10350   if (llvm::any_of(Regs, [&STI](auto Reg) {
10351         return STI.isRegisterReservedByUser(Reg.first);
10352       }))
10353     F.getContext().diagnose(DiagnosticInfoUnsupported{
10354         F, "Argument register required, but has been reserved."});
10355 }
10356 
10357 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10358   return CI->isTailCall();
10359 }
10360 
10361 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10362 #define NODE_NAME_CASE(NODE)                                                   \
10363   case RISCVISD::NODE:                                                         \
10364     return "RISCVISD::" #NODE;
10365   // clang-format off
10366   switch ((RISCVISD::NodeType)Opcode) {
10367   case RISCVISD::FIRST_NUMBER:
10368     break;
10369   NODE_NAME_CASE(RET_FLAG)
10370   NODE_NAME_CASE(URET_FLAG)
10371   NODE_NAME_CASE(SRET_FLAG)
10372   NODE_NAME_CASE(MRET_FLAG)
10373   NODE_NAME_CASE(CALL)
10374   NODE_NAME_CASE(SELECT_CC)
10375   NODE_NAME_CASE(BR_CC)
10376   NODE_NAME_CASE(BuildPairF64)
10377   NODE_NAME_CASE(SplitF64)
10378   NODE_NAME_CASE(TAIL)
10379   NODE_NAME_CASE(MULHSU)
10380   NODE_NAME_CASE(SLLW)
10381   NODE_NAME_CASE(SRAW)
10382   NODE_NAME_CASE(SRLW)
10383   NODE_NAME_CASE(DIVW)
10384   NODE_NAME_CASE(DIVUW)
10385   NODE_NAME_CASE(REMUW)
10386   NODE_NAME_CASE(ROLW)
10387   NODE_NAME_CASE(RORW)
10388   NODE_NAME_CASE(CLZW)
10389   NODE_NAME_CASE(CTZW)
10390   NODE_NAME_CASE(FSLW)
10391   NODE_NAME_CASE(FSRW)
10392   NODE_NAME_CASE(FSL)
10393   NODE_NAME_CASE(FSR)
10394   NODE_NAME_CASE(FMV_H_X)
10395   NODE_NAME_CASE(FMV_X_ANYEXTH)
10396   NODE_NAME_CASE(FMV_W_X_RV64)
10397   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10398   NODE_NAME_CASE(FCVT_X)
10399   NODE_NAME_CASE(FCVT_XU)
10400   NODE_NAME_CASE(FCVT_W_RV64)
10401   NODE_NAME_CASE(FCVT_WU_RV64)
10402   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10403   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10404   NODE_NAME_CASE(READ_CYCLE_WIDE)
10405   NODE_NAME_CASE(GREV)
10406   NODE_NAME_CASE(GREVW)
10407   NODE_NAME_CASE(GORC)
10408   NODE_NAME_CASE(GORCW)
10409   NODE_NAME_CASE(SHFL)
10410   NODE_NAME_CASE(SHFLW)
10411   NODE_NAME_CASE(UNSHFL)
10412   NODE_NAME_CASE(UNSHFLW)
10413   NODE_NAME_CASE(BFP)
10414   NODE_NAME_CASE(BFPW)
10415   NODE_NAME_CASE(BCOMPRESS)
10416   NODE_NAME_CASE(BCOMPRESSW)
10417   NODE_NAME_CASE(BDECOMPRESS)
10418   NODE_NAME_CASE(BDECOMPRESSW)
10419   NODE_NAME_CASE(VMV_V_X_VL)
10420   NODE_NAME_CASE(VFMV_V_F_VL)
10421   NODE_NAME_CASE(VMV_X_S)
10422   NODE_NAME_CASE(VMV_S_X_VL)
10423   NODE_NAME_CASE(VFMV_S_F_VL)
10424   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10425   NODE_NAME_CASE(READ_VLENB)
10426   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10427   NODE_NAME_CASE(VSLIDEUP_VL)
10428   NODE_NAME_CASE(VSLIDE1UP_VL)
10429   NODE_NAME_CASE(VSLIDEDOWN_VL)
10430   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10431   NODE_NAME_CASE(VID_VL)
10432   NODE_NAME_CASE(VFNCVT_ROD_VL)
10433   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10434   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10435   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10436   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10437   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10438   NODE_NAME_CASE(VECREDUCE_AND_VL)
10439   NODE_NAME_CASE(VECREDUCE_OR_VL)
10440   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10441   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10442   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10443   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10444   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10445   NODE_NAME_CASE(ADD_VL)
10446   NODE_NAME_CASE(AND_VL)
10447   NODE_NAME_CASE(MUL_VL)
10448   NODE_NAME_CASE(OR_VL)
10449   NODE_NAME_CASE(SDIV_VL)
10450   NODE_NAME_CASE(SHL_VL)
10451   NODE_NAME_CASE(SREM_VL)
10452   NODE_NAME_CASE(SRA_VL)
10453   NODE_NAME_CASE(SRL_VL)
10454   NODE_NAME_CASE(SUB_VL)
10455   NODE_NAME_CASE(UDIV_VL)
10456   NODE_NAME_CASE(UREM_VL)
10457   NODE_NAME_CASE(XOR_VL)
10458   NODE_NAME_CASE(SADDSAT_VL)
10459   NODE_NAME_CASE(UADDSAT_VL)
10460   NODE_NAME_CASE(SSUBSAT_VL)
10461   NODE_NAME_CASE(USUBSAT_VL)
10462   NODE_NAME_CASE(FADD_VL)
10463   NODE_NAME_CASE(FSUB_VL)
10464   NODE_NAME_CASE(FMUL_VL)
10465   NODE_NAME_CASE(FDIV_VL)
10466   NODE_NAME_CASE(FNEG_VL)
10467   NODE_NAME_CASE(FABS_VL)
10468   NODE_NAME_CASE(FSQRT_VL)
10469   NODE_NAME_CASE(FMA_VL)
10470   NODE_NAME_CASE(FCOPYSIGN_VL)
10471   NODE_NAME_CASE(SMIN_VL)
10472   NODE_NAME_CASE(SMAX_VL)
10473   NODE_NAME_CASE(UMIN_VL)
10474   NODE_NAME_CASE(UMAX_VL)
10475   NODE_NAME_CASE(FMINNUM_VL)
10476   NODE_NAME_CASE(FMAXNUM_VL)
10477   NODE_NAME_CASE(MULHS_VL)
10478   NODE_NAME_CASE(MULHU_VL)
10479   NODE_NAME_CASE(FP_TO_SINT_VL)
10480   NODE_NAME_CASE(FP_TO_UINT_VL)
10481   NODE_NAME_CASE(SINT_TO_FP_VL)
10482   NODE_NAME_CASE(UINT_TO_FP_VL)
10483   NODE_NAME_CASE(FP_EXTEND_VL)
10484   NODE_NAME_CASE(FP_ROUND_VL)
10485   NODE_NAME_CASE(VWMUL_VL)
10486   NODE_NAME_CASE(VWMULU_VL)
10487   NODE_NAME_CASE(VWMULSU_VL)
10488   NODE_NAME_CASE(VWADD_VL)
10489   NODE_NAME_CASE(VWADDU_VL)
10490   NODE_NAME_CASE(VWSUB_VL)
10491   NODE_NAME_CASE(VWSUBU_VL)
10492   NODE_NAME_CASE(VWADD_W_VL)
10493   NODE_NAME_CASE(VWADDU_W_VL)
10494   NODE_NAME_CASE(VWSUB_W_VL)
10495   NODE_NAME_CASE(VWSUBU_W_VL)
10496   NODE_NAME_CASE(SETCC_VL)
10497   NODE_NAME_CASE(VSELECT_VL)
10498   NODE_NAME_CASE(VP_MERGE_VL)
10499   NODE_NAME_CASE(VMAND_VL)
10500   NODE_NAME_CASE(VMOR_VL)
10501   NODE_NAME_CASE(VMXOR_VL)
10502   NODE_NAME_CASE(VMCLR_VL)
10503   NODE_NAME_CASE(VMSET_VL)
10504   NODE_NAME_CASE(VRGATHER_VX_VL)
10505   NODE_NAME_CASE(VRGATHER_VV_VL)
10506   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
10507   NODE_NAME_CASE(VSEXT_VL)
10508   NODE_NAME_CASE(VZEXT_VL)
10509   NODE_NAME_CASE(VCPOP_VL)
10510   NODE_NAME_CASE(VLE_VL)
10511   NODE_NAME_CASE(VSE_VL)
10512   NODE_NAME_CASE(READ_CSR)
10513   NODE_NAME_CASE(WRITE_CSR)
10514   NODE_NAME_CASE(SWAP_CSR)
10515   }
10516   // clang-format on
10517   return nullptr;
10518 #undef NODE_NAME_CASE
10519 }
10520 
10521 /// getConstraintType - Given a constraint letter, return the type of
10522 /// constraint it is for this target.
10523 RISCVTargetLowering::ConstraintType
10524 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
10525   if (Constraint.size() == 1) {
10526     switch (Constraint[0]) {
10527     default:
10528       break;
10529     case 'f':
10530       return C_RegisterClass;
10531     case 'I':
10532     case 'J':
10533     case 'K':
10534       return C_Immediate;
10535     case 'A':
10536       return C_Memory;
10537     case 'S': // A symbolic address
10538       return C_Other;
10539     }
10540   } else {
10541     if (Constraint == "vr" || Constraint == "vm")
10542       return C_RegisterClass;
10543   }
10544   return TargetLowering::getConstraintType(Constraint);
10545 }
10546 
10547 std::pair<unsigned, const TargetRegisterClass *>
10548 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
10549                                                   StringRef Constraint,
10550                                                   MVT VT) const {
10551   // First, see if this is a constraint that directly corresponds to a
10552   // RISCV register class.
10553   if (Constraint.size() == 1) {
10554     switch (Constraint[0]) {
10555     case 'r':
10556       // TODO: Support fixed vectors up to XLen for P extension?
10557       if (VT.isVector())
10558         break;
10559       return std::make_pair(0U, &RISCV::GPRRegClass);
10560     case 'f':
10561       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
10562         return std::make_pair(0U, &RISCV::FPR16RegClass);
10563       if (Subtarget.hasStdExtF() && VT == MVT::f32)
10564         return std::make_pair(0U, &RISCV::FPR32RegClass);
10565       if (Subtarget.hasStdExtD() && VT == MVT::f64)
10566         return std::make_pair(0U, &RISCV::FPR64RegClass);
10567       break;
10568     default:
10569       break;
10570     }
10571   } else if (Constraint == "vr") {
10572     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
10573                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10574       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
10575         return std::make_pair(0U, RC);
10576     }
10577   } else if (Constraint == "vm") {
10578     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
10579       return std::make_pair(0U, &RISCV::VMV0RegClass);
10580   }
10581 
10582   // Clang will correctly decode the usage of register name aliases into their
10583   // official names. However, other frontends like `rustc` do not. This allows
10584   // users of these frontends to use the ABI names for registers in LLVM-style
10585   // register constraints.
10586   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
10587                                .Case("{zero}", RISCV::X0)
10588                                .Case("{ra}", RISCV::X1)
10589                                .Case("{sp}", RISCV::X2)
10590                                .Case("{gp}", RISCV::X3)
10591                                .Case("{tp}", RISCV::X4)
10592                                .Case("{t0}", RISCV::X5)
10593                                .Case("{t1}", RISCV::X6)
10594                                .Case("{t2}", RISCV::X7)
10595                                .Cases("{s0}", "{fp}", RISCV::X8)
10596                                .Case("{s1}", RISCV::X9)
10597                                .Case("{a0}", RISCV::X10)
10598                                .Case("{a1}", RISCV::X11)
10599                                .Case("{a2}", RISCV::X12)
10600                                .Case("{a3}", RISCV::X13)
10601                                .Case("{a4}", RISCV::X14)
10602                                .Case("{a5}", RISCV::X15)
10603                                .Case("{a6}", RISCV::X16)
10604                                .Case("{a7}", RISCV::X17)
10605                                .Case("{s2}", RISCV::X18)
10606                                .Case("{s3}", RISCV::X19)
10607                                .Case("{s4}", RISCV::X20)
10608                                .Case("{s5}", RISCV::X21)
10609                                .Case("{s6}", RISCV::X22)
10610                                .Case("{s7}", RISCV::X23)
10611                                .Case("{s8}", RISCV::X24)
10612                                .Case("{s9}", RISCV::X25)
10613                                .Case("{s10}", RISCV::X26)
10614                                .Case("{s11}", RISCV::X27)
10615                                .Case("{t3}", RISCV::X28)
10616                                .Case("{t4}", RISCV::X29)
10617                                .Case("{t5}", RISCV::X30)
10618                                .Case("{t6}", RISCV::X31)
10619                                .Default(RISCV::NoRegister);
10620   if (XRegFromAlias != RISCV::NoRegister)
10621     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
10622 
10623   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
10624   // TableGen record rather than the AsmName to choose registers for InlineAsm
10625   // constraints, plus we want to match those names to the widest floating point
10626   // register type available, manually select floating point registers here.
10627   //
10628   // The second case is the ABI name of the register, so that frontends can also
10629   // use the ABI names in register constraint lists.
10630   if (Subtarget.hasStdExtF()) {
10631     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
10632                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
10633                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
10634                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
10635                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
10636                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
10637                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
10638                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
10639                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
10640                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
10641                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
10642                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
10643                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
10644                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
10645                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
10646                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
10647                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
10648                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
10649                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
10650                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
10651                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
10652                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
10653                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
10654                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
10655                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
10656                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
10657                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
10658                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
10659                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
10660                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
10661                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
10662                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
10663                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
10664                         .Default(RISCV::NoRegister);
10665     if (FReg != RISCV::NoRegister) {
10666       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
10667       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
10668         unsigned RegNo = FReg - RISCV::F0_F;
10669         unsigned DReg = RISCV::F0_D + RegNo;
10670         return std::make_pair(DReg, &RISCV::FPR64RegClass);
10671       }
10672       if (VT == MVT::f32 || VT == MVT::Other)
10673         return std::make_pair(FReg, &RISCV::FPR32RegClass);
10674       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
10675         unsigned RegNo = FReg - RISCV::F0_F;
10676         unsigned HReg = RISCV::F0_H + RegNo;
10677         return std::make_pair(HReg, &RISCV::FPR16RegClass);
10678       }
10679     }
10680   }
10681 
10682   if (Subtarget.hasVInstructions()) {
10683     Register VReg = StringSwitch<Register>(Constraint.lower())
10684                         .Case("{v0}", RISCV::V0)
10685                         .Case("{v1}", RISCV::V1)
10686                         .Case("{v2}", RISCV::V2)
10687                         .Case("{v3}", RISCV::V3)
10688                         .Case("{v4}", RISCV::V4)
10689                         .Case("{v5}", RISCV::V5)
10690                         .Case("{v6}", RISCV::V6)
10691                         .Case("{v7}", RISCV::V7)
10692                         .Case("{v8}", RISCV::V8)
10693                         .Case("{v9}", RISCV::V9)
10694                         .Case("{v10}", RISCV::V10)
10695                         .Case("{v11}", RISCV::V11)
10696                         .Case("{v12}", RISCV::V12)
10697                         .Case("{v13}", RISCV::V13)
10698                         .Case("{v14}", RISCV::V14)
10699                         .Case("{v15}", RISCV::V15)
10700                         .Case("{v16}", RISCV::V16)
10701                         .Case("{v17}", RISCV::V17)
10702                         .Case("{v18}", RISCV::V18)
10703                         .Case("{v19}", RISCV::V19)
10704                         .Case("{v20}", RISCV::V20)
10705                         .Case("{v21}", RISCV::V21)
10706                         .Case("{v22}", RISCV::V22)
10707                         .Case("{v23}", RISCV::V23)
10708                         .Case("{v24}", RISCV::V24)
10709                         .Case("{v25}", RISCV::V25)
10710                         .Case("{v26}", RISCV::V26)
10711                         .Case("{v27}", RISCV::V27)
10712                         .Case("{v28}", RISCV::V28)
10713                         .Case("{v29}", RISCV::V29)
10714                         .Case("{v30}", RISCV::V30)
10715                         .Case("{v31}", RISCV::V31)
10716                         .Default(RISCV::NoRegister);
10717     if (VReg != RISCV::NoRegister) {
10718       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
10719         return std::make_pair(VReg, &RISCV::VMRegClass);
10720       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
10721         return std::make_pair(VReg, &RISCV::VRRegClass);
10722       for (const auto *RC :
10723            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
10724         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
10725           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
10726           return std::make_pair(VReg, RC);
10727         }
10728       }
10729     }
10730   }
10731 
10732   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
10733 }
10734 
10735 unsigned
10736 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
10737   // Currently only support length 1 constraints.
10738   if (ConstraintCode.size() == 1) {
10739     switch (ConstraintCode[0]) {
10740     case 'A':
10741       return InlineAsm::Constraint_A;
10742     default:
10743       break;
10744     }
10745   }
10746 
10747   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
10748 }
10749 
10750 void RISCVTargetLowering::LowerAsmOperandForConstraint(
10751     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
10752     SelectionDAG &DAG) const {
10753   // Currently only support length 1 constraints.
10754   if (Constraint.length() == 1) {
10755     switch (Constraint[0]) {
10756     case 'I':
10757       // Validate & create a 12-bit signed immediate operand.
10758       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10759         uint64_t CVal = C->getSExtValue();
10760         if (isInt<12>(CVal))
10761           Ops.push_back(
10762               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10763       }
10764       return;
10765     case 'J':
10766       // Validate & create an integer zero operand.
10767       if (auto *C = dyn_cast<ConstantSDNode>(Op))
10768         if (C->getZExtValue() == 0)
10769           Ops.push_back(
10770               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
10771       return;
10772     case 'K':
10773       // Validate & create a 5-bit unsigned immediate operand.
10774       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
10775         uint64_t CVal = C->getZExtValue();
10776         if (isUInt<5>(CVal))
10777           Ops.push_back(
10778               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
10779       }
10780       return;
10781     case 'S':
10782       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
10783         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
10784                                                  GA->getValueType(0)));
10785       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
10786         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
10787                                                 BA->getValueType(0)));
10788       }
10789       return;
10790     default:
10791       break;
10792     }
10793   }
10794   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
10795 }
10796 
10797 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
10798                                                    Instruction *Inst,
10799                                                    AtomicOrdering Ord) const {
10800   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
10801     return Builder.CreateFence(Ord);
10802   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
10803     return Builder.CreateFence(AtomicOrdering::Release);
10804   return nullptr;
10805 }
10806 
10807 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
10808                                                     Instruction *Inst,
10809                                                     AtomicOrdering Ord) const {
10810   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
10811     return Builder.CreateFence(AtomicOrdering::Acquire);
10812   return nullptr;
10813 }
10814 
10815 TargetLowering::AtomicExpansionKind
10816 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
10817   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
10818   // point operations can't be used in an lr/sc sequence without breaking the
10819   // forward-progress guarantee.
10820   if (AI->isFloatingPointOperation())
10821     return AtomicExpansionKind::CmpXChg;
10822 
10823   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
10824   if (Size == 8 || Size == 16)
10825     return AtomicExpansionKind::MaskedIntrinsic;
10826   return AtomicExpansionKind::None;
10827 }
10828 
10829 static Intrinsic::ID
10830 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
10831   if (XLen == 32) {
10832     switch (BinOp) {
10833     default:
10834       llvm_unreachable("Unexpected AtomicRMW BinOp");
10835     case AtomicRMWInst::Xchg:
10836       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
10837     case AtomicRMWInst::Add:
10838       return Intrinsic::riscv_masked_atomicrmw_add_i32;
10839     case AtomicRMWInst::Sub:
10840       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
10841     case AtomicRMWInst::Nand:
10842       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
10843     case AtomicRMWInst::Max:
10844       return Intrinsic::riscv_masked_atomicrmw_max_i32;
10845     case AtomicRMWInst::Min:
10846       return Intrinsic::riscv_masked_atomicrmw_min_i32;
10847     case AtomicRMWInst::UMax:
10848       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
10849     case AtomicRMWInst::UMin:
10850       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
10851     }
10852   }
10853 
10854   if (XLen == 64) {
10855     switch (BinOp) {
10856     default:
10857       llvm_unreachable("Unexpected AtomicRMW BinOp");
10858     case AtomicRMWInst::Xchg:
10859       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
10860     case AtomicRMWInst::Add:
10861       return Intrinsic::riscv_masked_atomicrmw_add_i64;
10862     case AtomicRMWInst::Sub:
10863       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
10864     case AtomicRMWInst::Nand:
10865       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
10866     case AtomicRMWInst::Max:
10867       return Intrinsic::riscv_masked_atomicrmw_max_i64;
10868     case AtomicRMWInst::Min:
10869       return Intrinsic::riscv_masked_atomicrmw_min_i64;
10870     case AtomicRMWInst::UMax:
10871       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
10872     case AtomicRMWInst::UMin:
10873       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
10874     }
10875   }
10876 
10877   llvm_unreachable("Unexpected XLen\n");
10878 }
10879 
10880 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
10881     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
10882     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
10883   unsigned XLen = Subtarget.getXLen();
10884   Value *Ordering =
10885       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
10886   Type *Tys[] = {AlignedAddr->getType()};
10887   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
10888       AI->getModule(),
10889       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
10890 
10891   if (XLen == 64) {
10892     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
10893     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10894     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
10895   }
10896 
10897   Value *Result;
10898 
10899   // Must pass the shift amount needed to sign extend the loaded value prior
10900   // to performing a signed comparison for min/max. ShiftAmt is the number of
10901   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
10902   // is the number of bits to left+right shift the value in order to
10903   // sign-extend.
10904   if (AI->getOperation() == AtomicRMWInst::Min ||
10905       AI->getOperation() == AtomicRMWInst::Max) {
10906     const DataLayout &DL = AI->getModule()->getDataLayout();
10907     unsigned ValWidth =
10908         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
10909     Value *SextShamt =
10910         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
10911     Result = Builder.CreateCall(LrwOpScwLoop,
10912                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
10913   } else {
10914     Result =
10915         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
10916   }
10917 
10918   if (XLen == 64)
10919     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10920   return Result;
10921 }
10922 
10923 TargetLowering::AtomicExpansionKind
10924 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
10925     AtomicCmpXchgInst *CI) const {
10926   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
10927   if (Size == 8 || Size == 16)
10928     return AtomicExpansionKind::MaskedIntrinsic;
10929   return AtomicExpansionKind::None;
10930 }
10931 
10932 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
10933     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
10934     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
10935   unsigned XLen = Subtarget.getXLen();
10936   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
10937   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
10938   if (XLen == 64) {
10939     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
10940     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
10941     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
10942     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
10943   }
10944   Type *Tys[] = {AlignedAddr->getType()};
10945   Function *MaskedCmpXchg =
10946       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
10947   Value *Result = Builder.CreateCall(
10948       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
10949   if (XLen == 64)
10950     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
10951   return Result;
10952 }
10953 
10954 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
10955   return false;
10956 }
10957 
10958 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
10959                                                EVT VT) const {
10960   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
10961     return false;
10962 
10963   switch (FPVT.getSimpleVT().SimpleTy) {
10964   case MVT::f16:
10965     return Subtarget.hasStdExtZfh();
10966   case MVT::f32:
10967     return Subtarget.hasStdExtF();
10968   case MVT::f64:
10969     return Subtarget.hasStdExtD();
10970   default:
10971     return false;
10972   }
10973 }
10974 
10975 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
10976   // If we are using the small code model, we can reduce size of jump table
10977   // entry to 4 bytes.
10978   if (Subtarget.is64Bit() && !isPositionIndependent() &&
10979       getTargetMachine().getCodeModel() == CodeModel::Small) {
10980     return MachineJumpTableInfo::EK_Custom32;
10981   }
10982   return TargetLowering::getJumpTableEncoding();
10983 }
10984 
10985 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
10986     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
10987     unsigned uid, MCContext &Ctx) const {
10988   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
10989          getTargetMachine().getCodeModel() == CodeModel::Small);
10990   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
10991 }
10992 
10993 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
10994                                                      EVT VT) const {
10995   VT = VT.getScalarType();
10996 
10997   if (!VT.isSimple())
10998     return false;
10999 
11000   switch (VT.getSimpleVT().SimpleTy) {
11001   case MVT::f16:
11002     return Subtarget.hasStdExtZfh();
11003   case MVT::f32:
11004     return Subtarget.hasStdExtF();
11005   case MVT::f64:
11006     return Subtarget.hasStdExtD();
11007   default:
11008     break;
11009   }
11010 
11011   return false;
11012 }
11013 
11014 Register RISCVTargetLowering::getExceptionPointerRegister(
11015     const Constant *PersonalityFn) const {
11016   return RISCV::X10;
11017 }
11018 
11019 Register RISCVTargetLowering::getExceptionSelectorRegister(
11020     const Constant *PersonalityFn) const {
11021   return RISCV::X11;
11022 }
11023 
11024 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11025   // Return false to suppress the unnecessary extensions if the LibCall
11026   // arguments or return value is f32 type for LP64 ABI.
11027   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11028   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11029     return false;
11030 
11031   return true;
11032 }
11033 
11034 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11035   if (Subtarget.is64Bit() && Type == MVT::i32)
11036     return true;
11037 
11038   return IsSigned;
11039 }
11040 
11041 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11042                                                  SDValue C) const {
11043   // Check integral scalar types.
11044   if (VT.isScalarInteger()) {
11045     // Omit the optimization if the sub target has the M extension and the data
11046     // size exceeds XLen.
11047     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11048       return false;
11049     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11050       // Break the MUL to a SLLI and an ADD/SUB.
11051       const APInt &Imm = ConstNode->getAPIntValue();
11052       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11053           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11054         return true;
11055       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11056       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11057           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11058            (Imm - 8).isPowerOf2()))
11059         return true;
11060       // Omit the following optimization if the sub target has the M extension
11061       // and the data size >= XLen.
11062       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11063         return false;
11064       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11065       // a pair of LUI/ADDI.
11066       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11067         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11068         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11069             (1 - ImmS).isPowerOf2())
11070         return true;
11071       }
11072     }
11073   }
11074 
11075   return false;
11076 }
11077 
11078 bool RISCVTargetLowering::isMulAddWithConstProfitable(
11079     const SDValue &AddNode, const SDValue &ConstNode) const {
11080   // Let the DAGCombiner decide for vectors.
11081   EVT VT = AddNode.getValueType();
11082   if (VT.isVector())
11083     return true;
11084 
11085   // Let the DAGCombiner decide for larger types.
11086   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11087     return true;
11088 
11089   // It is worse if c1 is simm12 while c1*c2 is not.
11090   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11091   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11092   const APInt &C1 = C1Node->getAPIntValue();
11093   const APInt &C2 = C2Node->getAPIntValue();
11094   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11095     return false;
11096 
11097   // Default to true and let the DAGCombiner decide.
11098   return true;
11099 }
11100 
11101 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11102     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11103     bool *Fast) const {
11104   if (!VT.isVector())
11105     return false;
11106 
11107   EVT ElemVT = VT.getVectorElementType();
11108   if (Alignment >= ElemVT.getStoreSize()) {
11109     if (Fast)
11110       *Fast = true;
11111     return true;
11112   }
11113 
11114   return false;
11115 }
11116 
11117 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11118     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11119     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11120   bool IsABIRegCopy = CC.hasValue();
11121   EVT ValueVT = Val.getValueType();
11122   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11123     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11124     // and cast to f32.
11125     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11126     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11127     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11128                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11129     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11130     Parts[0] = Val;
11131     return true;
11132   }
11133 
11134   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11135     LLVMContext &Context = *DAG.getContext();
11136     EVT ValueEltVT = ValueVT.getVectorElementType();
11137     EVT PartEltVT = PartVT.getVectorElementType();
11138     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11139     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11140     if (PartVTBitSize % ValueVTBitSize == 0) {
11141       assert(PartVTBitSize >= ValueVTBitSize);
11142       // If the element types are different, bitcast to the same element type of
11143       // PartVT first.
11144       // Give an example here, we want copy a <vscale x 1 x i8> value to
11145       // <vscale x 4 x i16>.
11146       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11147       // subvector, then we can bitcast to <vscale x 4 x i16>.
11148       if (ValueEltVT != PartEltVT) {
11149         if (PartVTBitSize > ValueVTBitSize) {
11150           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11151           assert(Count != 0 && "The number of element should not be zero.");
11152           EVT SameEltTypeVT =
11153               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11154           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11155                             DAG.getUNDEF(SameEltTypeVT), Val,
11156                             DAG.getVectorIdxConstant(0, DL));
11157         }
11158         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11159       } else {
11160         Val =
11161             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11162                         Val, DAG.getVectorIdxConstant(0, DL));
11163       }
11164       Parts[0] = Val;
11165       return true;
11166     }
11167   }
11168   return false;
11169 }
11170 
11171 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11172     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11173     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11174   bool IsABIRegCopy = CC.hasValue();
11175   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11176     SDValue Val = Parts[0];
11177 
11178     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11179     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11180     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11181     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11182     return Val;
11183   }
11184 
11185   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11186     LLVMContext &Context = *DAG.getContext();
11187     SDValue Val = Parts[0];
11188     EVT ValueEltVT = ValueVT.getVectorElementType();
11189     EVT PartEltVT = PartVT.getVectorElementType();
11190     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11191     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11192     if (PartVTBitSize % ValueVTBitSize == 0) {
11193       assert(PartVTBitSize >= ValueVTBitSize);
11194       EVT SameEltTypeVT = ValueVT;
11195       // If the element types are different, convert it to the same element type
11196       // of PartVT.
11197       // Give an example here, we want copy a <vscale x 1 x i8> value from
11198       // <vscale x 4 x i16>.
11199       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11200       // then we can extract <vscale x 1 x i8>.
11201       if (ValueEltVT != PartEltVT) {
11202         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11203         assert(Count != 0 && "The number of element should not be zero.");
11204         SameEltTypeVT =
11205             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11206         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11207       }
11208       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11209                         DAG.getVectorIdxConstant(0, DL));
11210       return Val;
11211     }
11212   }
11213   return SDValue();
11214 }
11215 
11216 SDValue
11217 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11218                                    SelectionDAG &DAG,
11219                                    SmallVectorImpl<SDNode *> &Created) const {
11220   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11221   if (isIntDivCheap(N->getValueType(0), Attr))
11222     return SDValue(N, 0); // Lower SDIV as SDIV
11223 
11224   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11225          "Unexpected divisor!");
11226 
11227   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11228   if (!Subtarget.hasStdExtZbt())
11229     return SDValue();
11230 
11231   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11232   // Besides, more critical path instructions will be generated when dividing
11233   // by 2. So we keep using the original DAGs for these cases.
11234   unsigned Lg2 = Divisor.countTrailingZeros();
11235   if (Lg2 == 1 || Lg2 >= 12)
11236     return SDValue();
11237 
11238   // fold (sdiv X, pow2)
11239   EVT VT = N->getValueType(0);
11240   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11241     return SDValue();
11242 
11243   SDLoc DL(N);
11244   SDValue N0 = N->getOperand(0);
11245   SDValue Zero = DAG.getConstant(0, DL, VT);
11246   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11247 
11248   // Add (N0 < 0) ? Pow2 - 1 : 0;
11249   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11250   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11251   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11252 
11253   Created.push_back(Cmp.getNode());
11254   Created.push_back(Add.getNode());
11255   Created.push_back(Sel.getNode());
11256 
11257   // Divide by pow2.
11258   SDValue SRA =
11259       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11260 
11261   // If we're dividing by a positive value, we're done.  Otherwise, we must
11262   // negate the result.
11263   if (Divisor.isNonNegative())
11264     return SRA;
11265 
11266   Created.push_back(SRA.getNode());
11267   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11268 }
11269 
11270 #define GET_REGISTER_MATCHER
11271 #include "RISCVGenAsmMatcher.inc"
11272 
11273 Register
11274 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11275                                        const MachineFunction &MF) const {
11276   Register Reg = MatchRegisterAltName(RegName);
11277   if (Reg == RISCV::NoRegister)
11278     Reg = MatchRegisterName(RegName);
11279   if (Reg == RISCV::NoRegister)
11280     report_fatal_error(
11281         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11282   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11283   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11284     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11285                              StringRef(RegName) + "\"."));
11286   return Reg;
11287 }
11288 
11289 namespace llvm {
11290 namespace RISCVVIntrinsicsTable {
11291 
11292 #define GET_RISCVVIntrinsicsTable_IMPL
11293 #include "RISCVGenSearchableTables.inc"
11294 
11295 } // namespace RISCVVIntrinsicsTable
11296 
11297 } // namespace llvm
11298