1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/KnownBits.h"
39 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "riscv-lower"
45 
46 STATISTIC(NumTailCalls, "Number of tail calls");
47 
48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
49                                          const RISCVSubtarget &STI)
50     : TargetLowering(TM), Subtarget(STI) {
51 
52   if (Subtarget.isRV32E())
53     report_fatal_error("Codegen not yet implemented for RV32E");
54 
55   RISCVABI::ABI ABI = Subtarget.getTargetABI();
56   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
57 
58   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
59       !Subtarget.hasStdExtF()) {
60     errs() << "Hard-float 'f' ABI can't be used for a target that "
61                 "doesn't support the F instruction set extension (ignoring "
62                           "target-abi)\n";
63     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
64   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
65              !Subtarget.hasStdExtD()) {
66     errs() << "Hard-float 'd' ABI can't be used for a target that "
67               "doesn't support the D instruction set extension (ignoring "
68               "target-abi)\n";
69     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
70   }
71 
72   switch (ABI) {
73   default:
74     report_fatal_error("Don't know how to lower this ABI");
75   case RISCVABI::ABI_ILP32:
76   case RISCVABI::ABI_ILP32F:
77   case RISCVABI::ABI_ILP32D:
78   case RISCVABI::ABI_LP64:
79   case RISCVABI::ABI_LP64F:
80   case RISCVABI::ABI_LP64D:
81     break;
82   }
83 
84   MVT XLenVT = Subtarget.getXLenVT();
85 
86   // Set up the register classes.
87   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
88 
89   if (Subtarget.hasStdExtZfh())
90     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
91   if (Subtarget.hasStdExtF())
92     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
93   if (Subtarget.hasStdExtD())
94     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
95 
96   static const MVT::SimpleValueType BoolVecVTs[] = {
97       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
98       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
99   static const MVT::SimpleValueType IntVecVTs[] = {
100       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
101       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
102       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
103       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
104       MVT::nxv4i64, MVT::nxv8i64};
105   static const MVT::SimpleValueType F16VecVTs[] = {
106       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
107       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
108   static const MVT::SimpleValueType F32VecVTs[] = {
109       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
110   static const MVT::SimpleValueType F64VecVTs[] = {
111       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
112 
113   if (Subtarget.hasVInstructions()) {
114     auto addRegClassForRVV = [this](MVT VT) {
115       unsigned Size = VT.getSizeInBits().getKnownMinValue();
116       assert(Size <= 512 && isPowerOf2_32(Size));
117       const TargetRegisterClass *RC;
118       if (Size <= 64)
119         RC = &RISCV::VRRegClass;
120       else if (Size == 128)
121         RC = &RISCV::VRM2RegClass;
122       else if (Size == 256)
123         RC = &RISCV::VRM4RegClass;
124       else
125         RC = &RISCV::VRM8RegClass;
126 
127       addRegisterClass(VT, RC);
128     };
129 
130     for (MVT VT : BoolVecVTs)
131       addRegClassForRVV(VT);
132     for (MVT VT : IntVecVTs) {
133       if (VT.getVectorElementType() == MVT::i64 &&
134           !Subtarget.hasVInstructionsI64())
135         continue;
136       addRegClassForRVV(VT);
137     }
138 
139     if (Subtarget.hasVInstructionsF16())
140       for (MVT VT : F16VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.hasVInstructionsF32())
144       for (MVT VT : F32VecVTs)
145         addRegClassForRVV(VT);
146 
147     if (Subtarget.hasVInstructionsF64())
148       for (MVT VT : F64VecVTs)
149         addRegClassForRVV(VT);
150 
151     if (Subtarget.useRVVForFixedLengthVectors()) {
152       auto addRegClassForFixedVectors = [this](MVT VT) {
153         MVT ContainerVT = getContainerForFixedLengthVector(VT);
154         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
155         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
156         addRegisterClass(VT, TRI.getRegClass(RCID));
157       };
158       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
159         if (useRVVForFixedLengthVectorVT(VT))
160           addRegClassForFixedVectors(VT);
161 
162       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
163         if (useRVVForFixedLengthVectorVT(VT))
164           addRegClassForFixedVectors(VT);
165     }
166   }
167 
168   // Compute derived properties from the register classes.
169   computeRegisterProperties(STI.getRegisterInfo());
170 
171   setStackPointerRegisterToSaveRestore(RISCV::X2);
172 
173   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
174     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
175 
176   // TODO: add all necessary setOperationAction calls.
177   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
178 
179   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
180   setOperationAction(ISD::BR_CC, XLenVT, Expand);
181   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
182   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
183 
184   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
185   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
186 
187   setOperationAction(ISD::VASTART, MVT::Other, Custom);
188   setOperationAction(ISD::VAARG, MVT::Other, Expand);
189   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
190   setOperationAction(ISD::VAEND, MVT::Other, Expand);
191 
192   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
193   if (!Subtarget.hasStdExtZbb()) {
194     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
195     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
196   }
197 
198   if (Subtarget.is64Bit()) {
199     setOperationAction(ISD::ADD, MVT::i32, Custom);
200     setOperationAction(ISD::SUB, MVT::i32, Custom);
201     setOperationAction(ISD::SHL, MVT::i32, Custom);
202     setOperationAction(ISD::SRA, MVT::i32, Custom);
203     setOperationAction(ISD::SRL, MVT::i32, Custom);
204 
205     setOperationAction(ISD::UADDO, MVT::i32, Custom);
206     setOperationAction(ISD::USUBO, MVT::i32, Custom);
207     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
208     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
209   } else {
210     setLibcallName(RTLIB::SHL_I128, nullptr);
211     setLibcallName(RTLIB::SRL_I128, nullptr);
212     setLibcallName(RTLIB::SRA_I128, nullptr);
213     setLibcallName(RTLIB::MUL_I128, nullptr);
214     setLibcallName(RTLIB::MULO_I64, nullptr);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
254       Subtarget.hasStdExtZbkb()) {
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::ROTL, MVT::i32, Custom);
257       setOperationAction(ISD::ROTR, MVT::i32, Custom);
258     }
259   } else {
260     setOperationAction(ISD::ROTL, XLenVT, Expand);
261     setOperationAction(ISD::ROTR, XLenVT, Expand);
262   }
263 
264   if (Subtarget.hasStdExtZbp()) {
265     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
266     // more combining.
267     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
268     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
269     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
270     // BSWAP i8 doesn't exist.
271     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
272     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
273 
274     if (Subtarget.is64Bit()) {
275       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
276       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
277     }
278   } else {
279     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
280     // pattern match it directly in isel.
281     setOperationAction(ISD::BSWAP, XLenVT,
282                        (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb())
283                            ? Legal
284                            : Expand);
285     // Zbkb can use rev8+brev8 to implement bitreverse.
286     setOperationAction(ISD::BITREVERSE, XLenVT,
287                        Subtarget.hasStdExtZbkb() ? Custom : Expand);
288   }
289 
290   if (Subtarget.hasStdExtZbb()) {
291     setOperationAction(ISD::SMIN, XLenVT, Legal);
292     setOperationAction(ISD::SMAX, XLenVT, Legal);
293     setOperationAction(ISD::UMIN, XLenVT, Legal);
294     setOperationAction(ISD::UMAX, XLenVT, Legal);
295 
296     if (Subtarget.is64Bit()) {
297       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
298       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
299       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
300       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
301     }
302   } else {
303     setOperationAction(ISD::CTTZ, XLenVT, Expand);
304     setOperationAction(ISD::CTLZ, XLenVT, Expand);
305     setOperationAction(ISD::CTPOP, XLenVT, Expand);
306 
307     if (Subtarget.is64Bit())
308       setOperationAction(ISD::ABS, MVT::i32, Custom);
309   }
310 
311   if (Subtarget.hasStdExtZbt()) {
312     setOperationAction(ISD::FSHL, XLenVT, Custom);
313     setOperationAction(ISD::FSHR, XLenVT, Custom);
314     setOperationAction(ISD::SELECT, XLenVT, Legal);
315 
316     if (Subtarget.is64Bit()) {
317       setOperationAction(ISD::FSHL, MVT::i32, Custom);
318       setOperationAction(ISD::FSHR, MVT::i32, Custom);
319     }
320   } else {
321     setOperationAction(ISD::SELECT, XLenVT, Custom);
322   }
323 
324   static constexpr ISD::NodeType FPLegalNodeTypes[] = {
325       ISD::FMINNUM,        ISD::FMAXNUM,       ISD::LRINT,
326       ISD::LLRINT,         ISD::LROUND,        ISD::LLROUND,
327       ISD::STRICT_LRINT,   ISD::STRICT_LLRINT, ISD::STRICT_LROUND,
328       ISD::STRICT_LLROUND, ISD::STRICT_FMA,    ISD::STRICT_FADD,
329       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,   ISD::STRICT_FDIV,
330       ISD::STRICT_FSQRT,   ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS};
331 
332   static const ISD::CondCode FPCCToExpand[] = {
333       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
334       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
335       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
336 
337   static const ISD::NodeType FPOpToExpand[] = {
338       ISD::FSIN, ISD::FCOS,       ISD::FSINCOS,   ISD::FPOW,
339       ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
340 
341   if (Subtarget.hasStdExtZfh())
342     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
343 
344   if (Subtarget.hasStdExtZfh()) {
345     for (auto NT : FPLegalNodeTypes)
346       setOperationAction(NT, MVT::f16, Legal);
347     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal);
348     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
349     for (auto CC : FPCCToExpand)
350       setCondCodeAction(CC, MVT::f16, Expand);
351     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
352     setOperationAction(ISD::SELECT, MVT::f16, Custom);
353     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
354 
355     setOperationAction(ISD::FREM,       MVT::f16, Promote);
356     setOperationAction(ISD::FCEIL,      MVT::f16, Promote);
357     setOperationAction(ISD::FFLOOR,     MVT::f16, Promote);
358     setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote);
359     setOperationAction(ISD::FRINT,      MVT::f16, Promote);
360     setOperationAction(ISD::FROUND,     MVT::f16, Promote);
361     setOperationAction(ISD::FROUNDEVEN, MVT::f16, Promote);
362     setOperationAction(ISD::FTRUNC,     MVT::f16, Promote);
363     setOperationAction(ISD::FPOW,       MVT::f16, Promote);
364     setOperationAction(ISD::FPOWI,      MVT::f16, Promote);
365     setOperationAction(ISD::FCOS,       MVT::f16, Promote);
366     setOperationAction(ISD::FSIN,       MVT::f16, Promote);
367     setOperationAction(ISD::FSINCOS,    MVT::f16, Promote);
368     setOperationAction(ISD::FEXP,       MVT::f16, Promote);
369     setOperationAction(ISD::FEXP2,      MVT::f16, Promote);
370     setOperationAction(ISD::FLOG,       MVT::f16, Promote);
371     setOperationAction(ISD::FLOG2,      MVT::f16, Promote);
372     setOperationAction(ISD::FLOG10,     MVT::f16, Promote);
373 
374     // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have
375     // complete support for all operations in LegalizeDAG.
376 
377     // We need to custom promote this.
378     if (Subtarget.is64Bit())
379       setOperationAction(ISD::FPOWI, MVT::i32, Custom);
380   }
381 
382   if (Subtarget.hasStdExtF()) {
383     for (auto NT : FPLegalNodeTypes)
384       setOperationAction(NT, MVT::f32, Legal);
385     for (auto CC : FPCCToExpand)
386       setCondCodeAction(CC, MVT::f32, Expand);
387     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
388     setOperationAction(ISD::SELECT, MVT::f32, Custom);
389     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
390     for (auto Op : FPOpToExpand)
391       setOperationAction(Op, MVT::f32, Expand);
392     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
393     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
394   }
395 
396   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
397     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
398 
399   if (Subtarget.hasStdExtD()) {
400     for (auto NT : FPLegalNodeTypes)
401       setOperationAction(NT, MVT::f64, Legal);
402     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
403     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
404     for (auto CC : FPCCToExpand)
405       setCondCodeAction(CC, MVT::f64, Expand);
406     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Custom);
408     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
409     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
410     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
411     for (auto Op : FPOpToExpand)
412       setOperationAction(Op, MVT::f64, Expand);
413     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
414     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
415   }
416 
417   if (Subtarget.is64Bit()) {
418     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
419     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
420     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
421     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
422   }
423 
424   if (Subtarget.hasStdExtF()) {
425     setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
426     setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
427 
428     setOperationAction(ISD::STRICT_FP_TO_UINT, XLenVT, Legal);
429     setOperationAction(ISD::STRICT_FP_TO_SINT, XLenVT, Legal);
430     setOperationAction(ISD::STRICT_UINT_TO_FP, XLenVT, Legal);
431     setOperationAction(ISD::STRICT_SINT_TO_FP, XLenVT, Legal);
432 
433     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
434     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
435   }
436 
437   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
438   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
439   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
440   setOperationAction(ISD::JumpTable, XLenVT, Custom);
441 
442   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
443 
444   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
445   // Unfortunately this can't be determined just from the ISA naming string.
446   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
447                      Subtarget.is64Bit() ? Legal : Custom);
448 
449   setOperationAction(ISD::TRAP, MVT::Other, Legal);
450   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
451   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
452   if (Subtarget.is64Bit())
453     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
454 
455   if (Subtarget.hasStdExtA()) {
456     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
457     setMinCmpXchgSizeInBits(32);
458   } else {
459     setMaxAtomicSizeInBitsSupported(0);
460   }
461 
462   setBooleanContents(ZeroOrOneBooleanContent);
463 
464   if (Subtarget.hasVInstructions()) {
465     setBooleanVectorContents(ZeroOrOneBooleanContent);
466 
467     setOperationAction(ISD::VSCALE, XLenVT, Custom);
468 
469     // RVV intrinsics may have illegal operands.
470     // We also need to custom legalize vmv.x.s.
471     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
472     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
473     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
474     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
475     if (Subtarget.is64Bit()) {
476       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
477     } else {
478       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
479       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
480     }
481 
482     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
483     setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
484 
485     static const unsigned IntegerVPOps[] = {
486         ISD::VP_ADD,         ISD::VP_SUB,         ISD::VP_MUL,
487         ISD::VP_SDIV,        ISD::VP_UDIV,        ISD::VP_SREM,
488         ISD::VP_UREM,        ISD::VP_AND,         ISD::VP_OR,
489         ISD::VP_XOR,         ISD::VP_ASHR,        ISD::VP_LSHR,
490         ISD::VP_SHL,         ISD::VP_REDUCE_ADD,  ISD::VP_REDUCE_AND,
491         ISD::VP_REDUCE_OR,   ISD::VP_REDUCE_XOR,  ISD::VP_REDUCE_SMAX,
492         ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN,
493         ISD::VP_MERGE,       ISD::VP_SELECT,      ISD::VP_FPTOSI,
494         ISD::VP_FPTOUI,      ISD::VP_SETCC,       ISD::VP_SEXT,
495         ISD::VP_ZEXT};
496 
497     static const unsigned FloatingPointVPOps[] = {
498         ISD::VP_FADD,        ISD::VP_FSUB,
499         ISD::VP_FMUL,        ISD::VP_FDIV,
500         ISD::VP_FNEG,        ISD::VP_FMA,
501         ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
502         ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX,
503         ISD::VP_MERGE,       ISD::VP_SELECT,
504         ISD::VP_SITOFP,      ISD::VP_UITOFP,
505         ISD::VP_SETCC};
506 
507     if (!Subtarget.is64Bit()) {
508       // We must custom-lower certain vXi64 operations on RV32 due to the vector
509       // element type being illegal.
510       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
511       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
512 
513       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
514       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
515       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
516       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
517       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
518       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
519       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
520       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
521 
522       setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
523       setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
524       setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
525       setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
526       setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
527       setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
528       setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
529       setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
530     }
531 
532     for (MVT VT : BoolVecVTs) {
533       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
534 
535       // Mask VTs are custom-expanded into a series of standard nodes
536       setOperationAction(ISD::TRUNCATE, VT, Custom);
537       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
538       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
539       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
540 
541       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
542       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
543 
544       setOperationAction(ISD::SELECT, VT, Custom);
545       setOperationAction(ISD::SELECT_CC, VT, Expand);
546       setOperationAction(ISD::VSELECT, VT, Expand);
547       setOperationAction(ISD::VP_MERGE, VT, Expand);
548       setOperationAction(ISD::VP_SELECT, VT, Expand);
549 
550       setOperationAction(ISD::VP_AND, VT, Custom);
551       setOperationAction(ISD::VP_OR, VT, Custom);
552       setOperationAction(ISD::VP_XOR, VT, Custom);
553 
554       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
555       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
556       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
557 
558       setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
559       setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
560       setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
561 
562       // RVV has native int->float & float->int conversions where the
563       // element type sizes are within one power-of-two of each other. Any
564       // wider distances between type sizes have to be lowered as sequences
565       // which progressively narrow the gap in stages.
566       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
567       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
568       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
569       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
570 
571       // Expand all extending loads to types larger than this, and truncating
572       // stores from types larger than this.
573       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
574         setTruncStoreAction(OtherVT, VT, Expand);
575         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
576         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
577         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
578       }
579 
580       setOperationAction(ISD::VP_FPTOSI, VT, Custom);
581       setOperationAction(ISD::VP_FPTOUI, VT, Custom);
582     }
583 
584     for (MVT VT : IntVecVTs) {
585       if (VT.getVectorElementType() == MVT::i64 &&
586           !Subtarget.hasVInstructionsI64())
587         continue;
588 
589       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
590       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
591 
592       // Vectors implement MULHS/MULHU.
593       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
594       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
595 
596       // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*.
597       if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) {
598         setOperationAction(ISD::MULHU, VT, Expand);
599         setOperationAction(ISD::MULHS, VT, Expand);
600       }
601 
602       setOperationAction(ISD::SMIN, VT, Legal);
603       setOperationAction(ISD::SMAX, VT, Legal);
604       setOperationAction(ISD::UMIN, VT, Legal);
605       setOperationAction(ISD::UMAX, VT, Legal);
606 
607       setOperationAction(ISD::ROTL, VT, Expand);
608       setOperationAction(ISD::ROTR, VT, Expand);
609 
610       setOperationAction(ISD::CTTZ, VT, Expand);
611       setOperationAction(ISD::CTLZ, VT, Expand);
612       setOperationAction(ISD::CTPOP, VT, Expand);
613 
614       setOperationAction(ISD::BSWAP, VT, Expand);
615 
616       // Custom-lower extensions and truncations from/to mask types.
617       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
618       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
619       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
620 
621       // RVV has native int->float & float->int conversions where the
622       // element type sizes are within one power-of-two of each other. Any
623       // wider distances between type sizes have to be lowered as sequences
624       // which progressively narrow the gap in stages.
625       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
626       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
627       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
628       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
629 
630       setOperationAction(ISD::SADDSAT, VT, Legal);
631       setOperationAction(ISD::UADDSAT, VT, Legal);
632       setOperationAction(ISD::SSUBSAT, VT, Legal);
633       setOperationAction(ISD::USUBSAT, VT, Legal);
634 
635       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
636       // nodes which truncate by one power of two at a time.
637       setOperationAction(ISD::TRUNCATE, VT, Custom);
638 
639       // Custom-lower insert/extract operations to simplify patterns.
640       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
641       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
642 
643       // Custom-lower reduction operations to set up the corresponding custom
644       // nodes' operands.
645       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
646       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
647       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
648       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
649       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
650       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
651       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
652       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
653 
654       for (unsigned VPOpc : IntegerVPOps)
655         setOperationAction(VPOpc, VT, Custom);
656 
657       setOperationAction(ISD::LOAD, VT, Custom);
658       setOperationAction(ISD::STORE, VT, Custom);
659 
660       setOperationAction(ISD::MLOAD, VT, Custom);
661       setOperationAction(ISD::MSTORE, VT, Custom);
662       setOperationAction(ISD::MGATHER, VT, Custom);
663       setOperationAction(ISD::MSCATTER, VT, Custom);
664 
665       setOperationAction(ISD::VP_LOAD, VT, Custom);
666       setOperationAction(ISD::VP_STORE, VT, Custom);
667       setOperationAction(ISD::VP_GATHER, VT, Custom);
668       setOperationAction(ISD::VP_SCATTER, VT, Custom);
669 
670       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
671       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
672       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
673 
674       setOperationAction(ISD::SELECT, VT, Custom);
675       setOperationAction(ISD::SELECT_CC, VT, Expand);
676 
677       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
678       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
679 
680       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
681         setTruncStoreAction(VT, OtherVT, Expand);
682         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
683         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
684         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
685       }
686 
687       // Splice
688       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
689 
690       // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
691       // type that can represent the value exactly.
692       if (VT.getVectorElementType() != MVT::i64) {
693         MVT FloatEltVT =
694             VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
695         EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
696         if (isTypeLegal(FloatVT)) {
697           setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
698           setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
699         }
700       }
701     }
702 
703     // Expand various CCs to best match the RVV ISA, which natively supports UNE
704     // but no other unordered comparisons, and supports all ordered comparisons
705     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
706     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
707     // and we pattern-match those back to the "original", swapping operands once
708     // more. This way we catch both operations and both "vf" and "fv" forms with
709     // fewer patterns.
710     static const ISD::CondCode VFPCCToExpand[] = {
711         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
712         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
713         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
714     };
715 
716     // Sets common operation actions on RVV floating-point vector types.
717     const auto SetCommonVFPActions = [&](MVT VT) {
718       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
719       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
720       // sizes are within one power-of-two of each other. Therefore conversions
721       // between vXf16 and vXf64 must be lowered as sequences which convert via
722       // vXf32.
723       setOperationAction(ISD::FP_ROUND, VT, Custom);
724       setOperationAction(ISD::FP_EXTEND, VT, Custom);
725       // Custom-lower insert/extract operations to simplify patterns.
726       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
727       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
728       // Expand various condition codes (explained above).
729       for (auto CC : VFPCCToExpand)
730         setCondCodeAction(CC, VT, Expand);
731 
732       setOperationAction(ISD::FMINNUM, VT, Legal);
733       setOperationAction(ISD::FMAXNUM, VT, Legal);
734 
735       setOperationAction(ISD::FTRUNC, VT, Custom);
736       setOperationAction(ISD::FCEIL, VT, Custom);
737       setOperationAction(ISD::FFLOOR, VT, Custom);
738       setOperationAction(ISD::FROUND, VT, Custom);
739 
740       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
741       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
742       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
743       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
744 
745       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
746 
747       setOperationAction(ISD::LOAD, VT, Custom);
748       setOperationAction(ISD::STORE, VT, Custom);
749 
750       setOperationAction(ISD::MLOAD, VT, Custom);
751       setOperationAction(ISD::MSTORE, VT, Custom);
752       setOperationAction(ISD::MGATHER, VT, Custom);
753       setOperationAction(ISD::MSCATTER, VT, Custom);
754 
755       setOperationAction(ISD::VP_LOAD, VT, Custom);
756       setOperationAction(ISD::VP_STORE, VT, Custom);
757       setOperationAction(ISD::VP_GATHER, VT, Custom);
758       setOperationAction(ISD::VP_SCATTER, VT, Custom);
759 
760       setOperationAction(ISD::SELECT, VT, Custom);
761       setOperationAction(ISD::SELECT_CC, VT, Expand);
762 
763       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
764       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
765       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
766 
767       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
768       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
769 
770       for (unsigned VPOpc : FloatingPointVPOps)
771         setOperationAction(VPOpc, VT, Custom);
772     };
773 
774     // Sets common extload/truncstore actions on RVV floating-point vector
775     // types.
776     const auto SetCommonVFPExtLoadTruncStoreActions =
777         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
778           for (auto SmallVT : SmallerVTs) {
779             setTruncStoreAction(VT, SmallVT, Expand);
780             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
781           }
782         };
783 
784     if (Subtarget.hasVInstructionsF16())
785       for (MVT VT : F16VecVTs)
786         SetCommonVFPActions(VT);
787 
788     for (MVT VT : F32VecVTs) {
789       if (Subtarget.hasVInstructionsF32())
790         SetCommonVFPActions(VT);
791       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
792     }
793 
794     for (MVT VT : F64VecVTs) {
795       if (Subtarget.hasVInstructionsF64())
796         SetCommonVFPActions(VT);
797       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
798       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
799     }
800 
801     if (Subtarget.useRVVForFixedLengthVectors()) {
802       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
803         if (!useRVVForFixedLengthVectorVT(VT))
804           continue;
805 
806         // By default everything must be expanded.
807         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
808           setOperationAction(Op, VT, Expand);
809         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
810           setTruncStoreAction(VT, OtherVT, Expand);
811           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
812           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
813           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
814         }
815 
816         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
817         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
818         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
819 
820         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
821         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
822 
823         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
824         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
825 
826         setOperationAction(ISD::LOAD, VT, Custom);
827         setOperationAction(ISD::STORE, VT, Custom);
828 
829         setOperationAction(ISD::SETCC, VT, Custom);
830 
831         setOperationAction(ISD::SELECT, VT, Custom);
832 
833         setOperationAction(ISD::TRUNCATE, VT, Custom);
834 
835         setOperationAction(ISD::BITCAST, VT, Custom);
836 
837         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
838         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
839         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
840 
841         setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
842         setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
843         setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
844 
845         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
846         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
847         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
848         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
849 
850         // Operations below are different for between masks and other vectors.
851         if (VT.getVectorElementType() == MVT::i1) {
852           setOperationAction(ISD::VP_AND, VT, Custom);
853           setOperationAction(ISD::VP_OR, VT, Custom);
854           setOperationAction(ISD::VP_XOR, VT, Custom);
855           setOperationAction(ISD::AND, VT, Custom);
856           setOperationAction(ISD::OR, VT, Custom);
857           setOperationAction(ISD::XOR, VT, Custom);
858 
859           setOperationAction(ISD::VP_FPTOSI, VT, Custom);
860           setOperationAction(ISD::VP_FPTOUI, VT, Custom);
861           setOperationAction(ISD::VP_SETCC, VT, Custom);
862           continue;
863         }
864 
865         // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to
866         // it before type legalization for i64 vectors on RV32. It will then be
867         // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle.
868         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
869         // improvements first.
870         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
871           setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
872           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
873         }
874 
875         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
876         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
877 
878         setOperationAction(ISD::MLOAD, VT, Custom);
879         setOperationAction(ISD::MSTORE, VT, Custom);
880         setOperationAction(ISD::MGATHER, VT, Custom);
881         setOperationAction(ISD::MSCATTER, VT, Custom);
882 
883         setOperationAction(ISD::VP_LOAD, VT, Custom);
884         setOperationAction(ISD::VP_STORE, VT, Custom);
885         setOperationAction(ISD::VP_GATHER, VT, Custom);
886         setOperationAction(ISD::VP_SCATTER, VT, Custom);
887 
888         setOperationAction(ISD::ADD, VT, Custom);
889         setOperationAction(ISD::MUL, VT, Custom);
890         setOperationAction(ISD::SUB, VT, Custom);
891         setOperationAction(ISD::AND, VT, Custom);
892         setOperationAction(ISD::OR, VT, Custom);
893         setOperationAction(ISD::XOR, VT, Custom);
894         setOperationAction(ISD::SDIV, VT, Custom);
895         setOperationAction(ISD::SREM, VT, Custom);
896         setOperationAction(ISD::UDIV, VT, Custom);
897         setOperationAction(ISD::UREM, VT, Custom);
898         setOperationAction(ISD::SHL, VT, Custom);
899         setOperationAction(ISD::SRA, VT, Custom);
900         setOperationAction(ISD::SRL, VT, Custom);
901 
902         setOperationAction(ISD::SMIN, VT, Custom);
903         setOperationAction(ISD::SMAX, VT, Custom);
904         setOperationAction(ISD::UMIN, VT, Custom);
905         setOperationAction(ISD::UMAX, VT, Custom);
906         setOperationAction(ISD::ABS,  VT, Custom);
907 
908         // vXi64 MULHS/MULHU requires the V extension instead of Zve64*.
909         if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) {
910           setOperationAction(ISD::MULHS, VT, Custom);
911           setOperationAction(ISD::MULHU, VT, Custom);
912         }
913 
914         setOperationAction(ISD::SADDSAT, VT, Custom);
915         setOperationAction(ISD::UADDSAT, VT, Custom);
916         setOperationAction(ISD::SSUBSAT, VT, Custom);
917         setOperationAction(ISD::USUBSAT, VT, Custom);
918 
919         setOperationAction(ISD::VSELECT, VT, Custom);
920         setOperationAction(ISD::SELECT_CC, VT, Expand);
921 
922         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
923         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
924         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
925 
926         // Custom-lower reduction operations to set up the corresponding custom
927         // nodes' operands.
928         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
929         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
930         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
931         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
932         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
933 
934         for (unsigned VPOpc : IntegerVPOps)
935           setOperationAction(VPOpc, VT, Custom);
936 
937         // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point
938         // type that can represent the value exactly.
939         if (VT.getVectorElementType() != MVT::i64) {
940           MVT FloatEltVT =
941               VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32;
942           EVT FloatVT =
943               MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
944           if (isTypeLegal(FloatVT)) {
945             setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
946             setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
947           }
948         }
949       }
950 
951       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
952         if (!useRVVForFixedLengthVectorVT(VT))
953           continue;
954 
955         // By default everything must be expanded.
956         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
957           setOperationAction(Op, VT, Expand);
958         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
959           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
960           setTruncStoreAction(VT, OtherVT, Expand);
961         }
962 
963         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
964         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
965         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
966 
967         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
968         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
969         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
970         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
971         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
972 
973         setOperationAction(ISD::LOAD, VT, Custom);
974         setOperationAction(ISD::STORE, VT, Custom);
975         setOperationAction(ISD::MLOAD, VT, Custom);
976         setOperationAction(ISD::MSTORE, VT, Custom);
977         setOperationAction(ISD::MGATHER, VT, Custom);
978         setOperationAction(ISD::MSCATTER, VT, Custom);
979 
980         setOperationAction(ISD::VP_LOAD, VT, Custom);
981         setOperationAction(ISD::VP_STORE, VT, Custom);
982         setOperationAction(ISD::VP_GATHER, VT, Custom);
983         setOperationAction(ISD::VP_SCATTER, VT, Custom);
984 
985         setOperationAction(ISD::FADD, VT, Custom);
986         setOperationAction(ISD::FSUB, VT, Custom);
987         setOperationAction(ISD::FMUL, VT, Custom);
988         setOperationAction(ISD::FDIV, VT, Custom);
989         setOperationAction(ISD::FNEG, VT, Custom);
990         setOperationAction(ISD::FABS, VT, Custom);
991         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
992         setOperationAction(ISD::FSQRT, VT, Custom);
993         setOperationAction(ISD::FMA, VT, Custom);
994         setOperationAction(ISD::FMINNUM, VT, Custom);
995         setOperationAction(ISD::FMAXNUM, VT, Custom);
996 
997         setOperationAction(ISD::FP_ROUND, VT, Custom);
998         setOperationAction(ISD::FP_EXTEND, VT, Custom);
999 
1000         setOperationAction(ISD::FTRUNC, VT, Custom);
1001         setOperationAction(ISD::FCEIL, VT, Custom);
1002         setOperationAction(ISD::FFLOOR, VT, Custom);
1003         setOperationAction(ISD::FROUND, VT, Custom);
1004 
1005         for (auto CC : VFPCCToExpand)
1006           setCondCodeAction(CC, VT, Expand);
1007 
1008         setOperationAction(ISD::VSELECT, VT, Custom);
1009         setOperationAction(ISD::SELECT, VT, Custom);
1010         setOperationAction(ISD::SELECT_CC, VT, Expand);
1011 
1012         setOperationAction(ISD::BITCAST, VT, Custom);
1013 
1014         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
1015         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
1016         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
1017         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
1018 
1019         for (unsigned VPOpc : FloatingPointVPOps)
1020           setOperationAction(VPOpc, VT, Custom);
1021       }
1022 
1023       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
1024       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
1025       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
1026       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
1027       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1028       if (Subtarget.hasStdExtZfh())
1029         setOperationAction(ISD::BITCAST, MVT::f16, Custom);
1030       if (Subtarget.hasStdExtF())
1031         setOperationAction(ISD::BITCAST, MVT::f32, Custom);
1032       if (Subtarget.hasStdExtD())
1033         setOperationAction(ISD::BITCAST, MVT::f64, Custom);
1034     }
1035   }
1036 
1037   // Function alignments.
1038   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
1039   setMinFunctionAlignment(FunctionAlignment);
1040   setPrefFunctionAlignment(FunctionAlignment);
1041 
1042   setMinimumJumpTableEntries(5);
1043 
1044   // Jumps are expensive, compared to logic
1045   setJumpIsExpensive();
1046 
1047   setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
1048                        ISD::OR, ISD::XOR});
1049 
1050   if (Subtarget.hasStdExtZbp())
1051     setTargetDAGCombine({ISD::ROTL, ISD::ROTR});
1052   if (Subtarget.hasStdExtZbkb())
1053     setTargetDAGCombine(ISD::BITREVERSE);
1054   if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
1055     setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1056   if (Subtarget.hasStdExtF())
1057     setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1058                          ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT});
1059   if (Subtarget.hasVInstructions())
1060     setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER,
1061                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
1062                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR});
1063 
1064   setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
1065   setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
1066 }
1067 
1068 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
1069                                             LLVMContext &Context,
1070                                             EVT VT) const {
1071   if (!VT.isVector())
1072     return getPointerTy(DL);
1073   if (Subtarget.hasVInstructions() &&
1074       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
1075     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
1076   return VT.changeVectorElementTypeToInteger();
1077 }
1078 
1079 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
1080   return Subtarget.getXLenVT();
1081 }
1082 
1083 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
1084                                              const CallInst &I,
1085                                              MachineFunction &MF,
1086                                              unsigned Intrinsic) const {
1087   auto &DL = I.getModule()->getDataLayout();
1088   switch (Intrinsic) {
1089   default:
1090     return false;
1091   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
1092   case Intrinsic::riscv_masked_atomicrmw_add_i32:
1093   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
1094   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
1095   case Intrinsic::riscv_masked_atomicrmw_max_i32:
1096   case Intrinsic::riscv_masked_atomicrmw_min_i32:
1097   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
1098   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
1099   case Intrinsic::riscv_masked_cmpxchg_i32:
1100     Info.opc = ISD::INTRINSIC_W_CHAIN;
1101     Info.memVT = MVT::i32;
1102     Info.ptrVal = I.getArgOperand(0);
1103     Info.offset = 0;
1104     Info.align = Align(4);
1105     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
1106                  MachineMemOperand::MOVolatile;
1107     return true;
1108   case Intrinsic::riscv_masked_strided_load:
1109     Info.opc = ISD::INTRINSIC_W_CHAIN;
1110     Info.ptrVal = I.getArgOperand(1);
1111     Info.memVT = getValueType(DL, I.getType()->getScalarType());
1112     Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
1113     Info.size = MemoryLocation::UnknownSize;
1114     Info.flags |= MachineMemOperand::MOLoad;
1115     return true;
1116   case Intrinsic::riscv_masked_strided_store:
1117     Info.opc = ISD::INTRINSIC_VOID;
1118     Info.ptrVal = I.getArgOperand(1);
1119     Info.memVT =
1120         getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
1121     Info.align = Align(
1122         DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
1123         8);
1124     Info.size = MemoryLocation::UnknownSize;
1125     Info.flags |= MachineMemOperand::MOStore;
1126     return true;
1127   case Intrinsic::riscv_seg2_load:
1128   case Intrinsic::riscv_seg3_load:
1129   case Intrinsic::riscv_seg4_load:
1130   case Intrinsic::riscv_seg5_load:
1131   case Intrinsic::riscv_seg6_load:
1132   case Intrinsic::riscv_seg7_load:
1133   case Intrinsic::riscv_seg8_load:
1134     Info.opc = ISD::INTRINSIC_W_CHAIN;
1135     Info.ptrVal = I.getArgOperand(0);
1136     Info.memVT =
1137         getValueType(DL, I.getType()->getStructElementType(0)->getScalarType());
1138     Info.align =
1139         Align(DL.getTypeSizeInBits(
1140                   I.getType()->getStructElementType(0)->getScalarType()) /
1141               8);
1142     Info.size = MemoryLocation::UnknownSize;
1143     Info.flags |= MachineMemOperand::MOLoad;
1144     return true;
1145   }
1146 }
1147 
1148 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1149                                                 const AddrMode &AM, Type *Ty,
1150                                                 unsigned AS,
1151                                                 Instruction *I) const {
1152   // No global is ever allowed as a base.
1153   if (AM.BaseGV)
1154     return false;
1155 
1156   // Require a 12-bit signed offset.
1157   if (!isInt<12>(AM.BaseOffs))
1158     return false;
1159 
1160   switch (AM.Scale) {
1161   case 0: // "r+i" or just "i", depending on HasBaseReg.
1162     break;
1163   case 1:
1164     if (!AM.HasBaseReg) // allow "r+i".
1165       break;
1166     return false; // disallow "r+r" or "r+r+i".
1167   default:
1168     return false;
1169   }
1170 
1171   return true;
1172 }
1173 
1174 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1175   return isInt<12>(Imm);
1176 }
1177 
1178 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1179   return isInt<12>(Imm);
1180 }
1181 
1182 // On RV32, 64-bit integers are split into their high and low parts and held
1183 // in two different registers, so the trunc is free since the low register can
1184 // just be used.
1185 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1186   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1187     return false;
1188   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1189   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1190   return (SrcBits == 64 && DestBits == 32);
1191 }
1192 
1193 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1194   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1195       !SrcVT.isInteger() || !DstVT.isInteger())
1196     return false;
1197   unsigned SrcBits = SrcVT.getSizeInBits();
1198   unsigned DestBits = DstVT.getSizeInBits();
1199   return (SrcBits == 64 && DestBits == 32);
1200 }
1201 
1202 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1203   // Zexts are free if they can be combined with a load.
1204   // Don't advertise i32->i64 zextload as being free for RV64. It interacts
1205   // poorly with type legalization of compares preferring sext.
1206   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1207     EVT MemVT = LD->getMemoryVT();
1208     if ((MemVT == MVT::i8 || MemVT == MVT::i16) &&
1209         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1210          LD->getExtensionType() == ISD::ZEXTLOAD))
1211       return true;
1212   }
1213 
1214   return TargetLowering::isZExtFree(Val, VT2);
1215 }
1216 
1217 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1218   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1219 }
1220 
1221 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1222   return Subtarget.hasStdExtZbb();
1223 }
1224 
1225 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1226   return Subtarget.hasStdExtZbb();
1227 }
1228 
1229 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
1230   EVT VT = Y.getValueType();
1231 
1232   // FIXME: Support vectors once we have tests.
1233   if (VT.isVector())
1234     return false;
1235 
1236   return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() ||
1237           Subtarget.hasStdExtZbkb()) &&
1238          !isa<ConstantSDNode>(Y);
1239 }
1240 
1241 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
1242   // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position.
1243   auto *C = dyn_cast<ConstantSDNode>(Y);
1244   return C && C->getAPIntValue().ule(10);
1245 }
1246 
1247 /// Check if sinking \p I's operands to I's basic block is profitable, because
1248 /// the operands can be folded into a target instruction, e.g.
1249 /// splats of scalars can fold into vector instructions.
1250 bool RISCVTargetLowering::shouldSinkOperands(
1251     Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1252   using namespace llvm::PatternMatch;
1253 
1254   if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
1255     return false;
1256 
1257   auto IsSinker = [&](Instruction *I, int Operand) {
1258     switch (I->getOpcode()) {
1259     case Instruction::Add:
1260     case Instruction::Sub:
1261     case Instruction::Mul:
1262     case Instruction::And:
1263     case Instruction::Or:
1264     case Instruction::Xor:
1265     case Instruction::FAdd:
1266     case Instruction::FSub:
1267     case Instruction::FMul:
1268     case Instruction::FDiv:
1269     case Instruction::ICmp:
1270     case Instruction::FCmp:
1271       return true;
1272     case Instruction::Shl:
1273     case Instruction::LShr:
1274     case Instruction::AShr:
1275     case Instruction::UDiv:
1276     case Instruction::SDiv:
1277     case Instruction::URem:
1278     case Instruction::SRem:
1279       return Operand == 1;
1280     case Instruction::Call:
1281       if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1282         switch (II->getIntrinsicID()) {
1283         case Intrinsic::fma:
1284         case Intrinsic::vp_fma:
1285           return Operand == 0 || Operand == 1;
1286         // FIXME: Our patterns can only match vx/vf instructions when the splat
1287         // it on the RHS, because TableGen doesn't recognize our VP operations
1288         // as commutative.
1289         case Intrinsic::vp_add:
1290         case Intrinsic::vp_mul:
1291         case Intrinsic::vp_and:
1292         case Intrinsic::vp_or:
1293         case Intrinsic::vp_xor:
1294         case Intrinsic::vp_fadd:
1295         case Intrinsic::vp_fmul:
1296         case Intrinsic::vp_shl:
1297         case Intrinsic::vp_lshr:
1298         case Intrinsic::vp_ashr:
1299         case Intrinsic::vp_udiv:
1300         case Intrinsic::vp_sdiv:
1301         case Intrinsic::vp_urem:
1302         case Intrinsic::vp_srem:
1303           return Operand == 1;
1304         // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have
1305         // explicit patterns for both LHS and RHS (as 'vr' versions).
1306         case Intrinsic::vp_sub:
1307         case Intrinsic::vp_fsub:
1308         case Intrinsic::vp_fdiv:
1309           return Operand == 0 || Operand == 1;
1310         default:
1311           return false;
1312         }
1313       }
1314       return false;
1315     default:
1316       return false;
1317     }
1318   };
1319 
1320   for (auto OpIdx : enumerate(I->operands())) {
1321     if (!IsSinker(I, OpIdx.index()))
1322       continue;
1323 
1324     Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1325     // Make sure we are not already sinking this operand
1326     if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1327       continue;
1328 
1329     // We are looking for a splat that can be sunk.
1330     if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1331                              m_Undef(), m_ZeroMask())))
1332       continue;
1333 
1334     // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1335     // and vector registers
1336     for (Use &U : Op->uses()) {
1337       Instruction *Insn = cast<Instruction>(U.getUser());
1338       if (!IsSinker(Insn, U.getOperandNo()))
1339         return false;
1340     }
1341 
1342     Ops.push_back(&Op->getOperandUse(0));
1343     Ops.push_back(&OpIdx.value());
1344   }
1345   return true;
1346 }
1347 
1348 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1349                                        bool ForCodeSize) const {
1350   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1351   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1352     return false;
1353   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1354     return false;
1355   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1356     return false;
1357   return Imm.isZero();
1358 }
1359 
1360 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1361   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1362          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1363          (VT == MVT::f64 && Subtarget.hasStdExtD());
1364 }
1365 
1366 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1367                                                       CallingConv::ID CC,
1368                                                       EVT VT) const {
1369   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1370   // We might still end up using a GPR but that will be decided based on ABI.
1371   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1372   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1373     return MVT::f32;
1374 
1375   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1376 }
1377 
1378 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1379                                                            CallingConv::ID CC,
1380                                                            EVT VT) const {
1381   // Use f32 to pass f16 if it is legal and Zfh is not enabled.
1382   // We might still end up using a GPR but that will be decided based on ABI.
1383   // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin.
1384   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1385     return 1;
1386 
1387   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1388 }
1389 
1390 // Changes the condition code and swaps operands if necessary, so the SetCC
1391 // operation matches one of the comparisons supported directly by branches
1392 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1393 // with 1/-1.
1394 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1395                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1396   // Convert X > -1 to X >= 0.
1397   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1398     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1399     CC = ISD::SETGE;
1400     return;
1401   }
1402   // Convert X < 1 to 0 >= X.
1403   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1404     RHS = LHS;
1405     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1406     CC = ISD::SETGE;
1407     return;
1408   }
1409 
1410   switch (CC) {
1411   default:
1412     break;
1413   case ISD::SETGT:
1414   case ISD::SETLE:
1415   case ISD::SETUGT:
1416   case ISD::SETULE:
1417     CC = ISD::getSetCCSwappedOperands(CC);
1418     std::swap(LHS, RHS);
1419     break;
1420   }
1421 }
1422 
1423 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1424   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1425   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1426   if (VT.getVectorElementType() == MVT::i1)
1427     KnownSize *= 8;
1428 
1429   switch (KnownSize) {
1430   default:
1431     llvm_unreachable("Invalid LMUL.");
1432   case 8:
1433     return RISCVII::VLMUL::LMUL_F8;
1434   case 16:
1435     return RISCVII::VLMUL::LMUL_F4;
1436   case 32:
1437     return RISCVII::VLMUL::LMUL_F2;
1438   case 64:
1439     return RISCVII::VLMUL::LMUL_1;
1440   case 128:
1441     return RISCVII::VLMUL::LMUL_2;
1442   case 256:
1443     return RISCVII::VLMUL::LMUL_4;
1444   case 512:
1445     return RISCVII::VLMUL::LMUL_8;
1446   }
1447 }
1448 
1449 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1450   switch (LMul) {
1451   default:
1452     llvm_unreachable("Invalid LMUL.");
1453   case RISCVII::VLMUL::LMUL_F8:
1454   case RISCVII::VLMUL::LMUL_F4:
1455   case RISCVII::VLMUL::LMUL_F2:
1456   case RISCVII::VLMUL::LMUL_1:
1457     return RISCV::VRRegClassID;
1458   case RISCVII::VLMUL::LMUL_2:
1459     return RISCV::VRM2RegClassID;
1460   case RISCVII::VLMUL::LMUL_4:
1461     return RISCV::VRM4RegClassID;
1462   case RISCVII::VLMUL::LMUL_8:
1463     return RISCV::VRM8RegClassID;
1464   }
1465 }
1466 
1467 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1468   RISCVII::VLMUL LMUL = getLMUL(VT);
1469   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1470       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1471       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1472       LMUL == RISCVII::VLMUL::LMUL_1) {
1473     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1474                   "Unexpected subreg numbering");
1475     return RISCV::sub_vrm1_0 + Index;
1476   }
1477   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1478     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1479                   "Unexpected subreg numbering");
1480     return RISCV::sub_vrm2_0 + Index;
1481   }
1482   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1483     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1484                   "Unexpected subreg numbering");
1485     return RISCV::sub_vrm4_0 + Index;
1486   }
1487   llvm_unreachable("Invalid vector type.");
1488 }
1489 
1490 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1491   if (VT.getVectorElementType() == MVT::i1)
1492     return RISCV::VRRegClassID;
1493   return getRegClassIDForLMUL(getLMUL(VT));
1494 }
1495 
1496 // Attempt to decompose a subvector insert/extract between VecVT and
1497 // SubVecVT via subregister indices. Returns the subregister index that
1498 // can perform the subvector insert/extract with the given element index, as
1499 // well as the index corresponding to any leftover subvectors that must be
1500 // further inserted/extracted within the register class for SubVecVT.
1501 std::pair<unsigned, unsigned>
1502 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1503     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1504     const RISCVRegisterInfo *TRI) {
1505   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1506                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1507                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1508                 "Register classes not ordered");
1509   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1510   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1511   // Try to compose a subregister index that takes us from the incoming
1512   // LMUL>1 register class down to the outgoing one. At each step we half
1513   // the LMUL:
1514   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1515   // Note that this is not guaranteed to find a subregister index, such as
1516   // when we are extracting from one VR type to another.
1517   unsigned SubRegIdx = RISCV::NoSubRegister;
1518   for (const unsigned RCID :
1519        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1520     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1521       VecVT = VecVT.getHalfNumVectorElementsVT();
1522       bool IsHi =
1523           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1524       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1525                                             getSubregIndexByMVT(VecVT, IsHi));
1526       if (IsHi)
1527         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1528     }
1529   return {SubRegIdx, InsertExtractIdx};
1530 }
1531 
1532 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1533 // stores for those types.
1534 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1535   return !Subtarget.useRVVForFixedLengthVectors() ||
1536          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1537 }
1538 
1539 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1540   if (ScalarTy->isPointerTy())
1541     return true;
1542 
1543   if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1544       ScalarTy->isIntegerTy(32))
1545     return true;
1546 
1547   if (ScalarTy->isIntegerTy(64))
1548     return Subtarget.hasVInstructionsI64();
1549 
1550   if (ScalarTy->isHalfTy())
1551     return Subtarget.hasVInstructionsF16();
1552   if (ScalarTy->isFloatTy())
1553     return Subtarget.hasVInstructionsF32();
1554   if (ScalarTy->isDoubleTy())
1555     return Subtarget.hasVInstructionsF64();
1556 
1557   return false;
1558 }
1559 
1560 static SDValue getVLOperand(SDValue Op) {
1561   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
1562           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
1563          "Unexpected opcode");
1564   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
1565   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
1566   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
1567       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
1568   if (!II)
1569     return SDValue();
1570   return Op.getOperand(II->VLOperand + 1 + HasChain);
1571 }
1572 
1573 static bool useRVVForFixedLengthVectorVT(MVT VT,
1574                                          const RISCVSubtarget &Subtarget) {
1575   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1576   if (!Subtarget.useRVVForFixedLengthVectors())
1577     return false;
1578 
1579   // We only support a set of vector types with a consistent maximum fixed size
1580   // across all supported vector element types to avoid legalization issues.
1581   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1582   // fixed-length vector type we support is 1024 bytes.
1583   if (VT.getFixedSizeInBits() > 1024 * 8)
1584     return false;
1585 
1586   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1587 
1588   MVT EltVT = VT.getVectorElementType();
1589 
1590   // Don't use RVV for vectors we cannot scalarize if required.
1591   switch (EltVT.SimpleTy) {
1592   // i1 is supported but has different rules.
1593   default:
1594     return false;
1595   case MVT::i1:
1596     // Masks can only use a single register.
1597     if (VT.getVectorNumElements() > MinVLen)
1598       return false;
1599     MinVLen /= 8;
1600     break;
1601   case MVT::i8:
1602   case MVT::i16:
1603   case MVT::i32:
1604     break;
1605   case MVT::i64:
1606     if (!Subtarget.hasVInstructionsI64())
1607       return false;
1608     break;
1609   case MVT::f16:
1610     if (!Subtarget.hasVInstructionsF16())
1611       return false;
1612     break;
1613   case MVT::f32:
1614     if (!Subtarget.hasVInstructionsF32())
1615       return false;
1616     break;
1617   case MVT::f64:
1618     if (!Subtarget.hasVInstructionsF64())
1619       return false;
1620     break;
1621   }
1622 
1623   // Reject elements larger than ELEN.
1624   if (EltVT.getSizeInBits() > Subtarget.getELEN())
1625     return false;
1626 
1627   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1628   // Don't use RVV for types that don't fit.
1629   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1630     return false;
1631 
1632   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1633   // the base fixed length RVV support in place.
1634   if (!VT.isPow2VectorType())
1635     return false;
1636 
1637   return true;
1638 }
1639 
1640 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1641   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1642 }
1643 
1644 // Return the largest legal scalable vector type that matches VT's element type.
1645 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1646                                             const RISCVSubtarget &Subtarget) {
1647   // This may be called before legal types are setup.
1648   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1649           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1650          "Expected legal fixed length vector!");
1651 
1652   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1653   unsigned MaxELen = Subtarget.getELEN();
1654 
1655   MVT EltVT = VT.getVectorElementType();
1656   switch (EltVT.SimpleTy) {
1657   default:
1658     llvm_unreachable("unexpected element type for RVV container");
1659   case MVT::i1:
1660   case MVT::i8:
1661   case MVT::i16:
1662   case MVT::i32:
1663   case MVT::i64:
1664   case MVT::f16:
1665   case MVT::f32:
1666   case MVT::f64: {
1667     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1668     // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1669     // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1670     unsigned NumElts =
1671         (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1672     NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1673     assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1674     return MVT::getScalableVectorVT(EltVT, NumElts);
1675   }
1676   }
1677 }
1678 
1679 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1680                                             const RISCVSubtarget &Subtarget) {
1681   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1682                                           Subtarget);
1683 }
1684 
1685 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1686   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1687 }
1688 
1689 // Grow V to consume an entire RVV register.
1690 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1691                                        const RISCVSubtarget &Subtarget) {
1692   assert(VT.isScalableVector() &&
1693          "Expected to convert into a scalable vector!");
1694   assert(V.getValueType().isFixedLengthVector() &&
1695          "Expected a fixed length vector operand!");
1696   SDLoc DL(V);
1697   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1698   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1699 }
1700 
1701 // Shrink V so it's just big enough to maintain a VT's worth of data.
1702 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1703                                          const RISCVSubtarget &Subtarget) {
1704   assert(VT.isFixedLengthVector() &&
1705          "Expected to convert into a fixed length vector!");
1706   assert(V.getValueType().isScalableVector() &&
1707          "Expected a scalable vector operand!");
1708   SDLoc DL(V);
1709   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1710   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1711 }
1712 
1713 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1714 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1715 // the vector type that it is contained in.
1716 static std::pair<SDValue, SDValue>
1717 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1718                 const RISCVSubtarget &Subtarget) {
1719   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1720   MVT XLenVT = Subtarget.getXLenVT();
1721   SDValue VL = VecVT.isFixedLengthVector()
1722                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1723                    : DAG.getRegister(RISCV::X0, XLenVT);
1724   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1725   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1726   return {Mask, VL};
1727 }
1728 
1729 // As above but assuming the given type is a scalable vector type.
1730 static std::pair<SDValue, SDValue>
1731 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1732                         const RISCVSubtarget &Subtarget) {
1733   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1734   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1735 }
1736 
1737 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1738 // of either is (currently) supported. This can get us into an infinite loop
1739 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1740 // as a ..., etc.
1741 // Until either (or both) of these can reliably lower any node, reporting that
1742 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1743 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1744 // which is not desirable.
1745 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1746     EVT VT, unsigned DefinedValues) const {
1747   return false;
1748 }
1749 
1750 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
1751                                   const RISCVSubtarget &Subtarget) {
1752   // RISCV FP-to-int conversions saturate to the destination register size, but
1753   // don't produce 0 for nan. We can use a conversion instruction and fix the
1754   // nan case with a compare and a select.
1755   SDValue Src = Op.getOperand(0);
1756 
1757   EVT DstVT = Op.getValueType();
1758   EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1759 
1760   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1761   unsigned Opc;
1762   if (SatVT == DstVT)
1763     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
1764   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1765     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
1766   else
1767     return SDValue();
1768   // FIXME: Support other SatVTs by clamping before or after the conversion.
1769 
1770   SDLoc DL(Op);
1771   SDValue FpToInt = DAG.getNode(
1772       Opc, DL, DstVT, Src,
1773       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT()));
1774 
1775   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1776   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1777 }
1778 
1779 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain
1780 // and back. Taking care to avoid converting values that are nan or already
1781 // correct.
1782 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't
1783 // have FRM dependencies modeled yet.
1784 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) {
1785   MVT VT = Op.getSimpleValueType();
1786   assert(VT.isVector() && "Unexpected type");
1787 
1788   SDLoc DL(Op);
1789 
1790   // Freeze the source since we are increasing the number of uses.
1791   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1792 
1793   // Truncate to integer and convert back to FP.
1794   MVT IntVT = VT.changeVectorElementTypeToInteger();
1795   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src);
1796   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1797 
1798   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1799 
1800   if (Op.getOpcode() == ISD::FCEIL) {
1801     // If the truncated value is the greater than or equal to the original
1802     // value, we've computed the ceil. Otherwise, we went the wrong way and
1803     // need to increase by 1.
1804     // FIXME: This should use a masked operation. Handle here or in isel?
1805     SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated,
1806                                  DAG.getConstantFP(1.0, DL, VT));
1807     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT);
1808     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1809   } else if (Op.getOpcode() == ISD::FFLOOR) {
1810     // If the truncated value is the less than or equal to the original value,
1811     // we've computed the floor. Otherwise, we went the wrong way and need to
1812     // decrease by 1.
1813     // FIXME: This should use a masked operation. Handle here or in isel?
1814     SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated,
1815                                  DAG.getConstantFP(1.0, DL, VT));
1816     SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT);
1817     Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated);
1818   }
1819 
1820   // Restore the original sign so that -0.0 is preserved.
1821   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1822 
1823   // Determine the largest integer that can be represented exactly. This and
1824   // values larger than it don't have any fractional bits so don't need to
1825   // be converted.
1826   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1827   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1828   APFloat MaxVal = APFloat(FltSem);
1829   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1830                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1831   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1832 
1833   // If abs(Src) was larger than MaxVal or nan, keep it.
1834   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1835   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1836   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1837 }
1838 
1839 // ISD::FROUND is defined to round to nearest with ties rounding away from 0.
1840 // This mode isn't supported in vector hardware on RISCV. But as long as we
1841 // aren't compiling with trapping math, we can emulate this with
1842 // floor(X + copysign(nextafter(0.5, 0.0), X)).
1843 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM
1844 // dependencies modeled yet.
1845 // FIXME: Use masked operations to avoid final merge.
1846 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) {
1847   MVT VT = Op.getSimpleValueType();
1848   assert(VT.isVector() && "Unexpected type");
1849 
1850   SDLoc DL(Op);
1851 
1852   // Freeze the source since we are increasing the number of uses.
1853   SDValue Src = DAG.getFreeze(Op.getOperand(0));
1854 
1855   // We do the conversion on the absolute value and fix the sign at the end.
1856   SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src);
1857 
1858   const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT);
1859   bool Ignored;
1860   APFloat Point5Pred = APFloat(0.5f);
1861   Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored);
1862   Point5Pred.next(/*nextDown*/ true);
1863 
1864   // Add the adjustment.
1865   SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs,
1866                                DAG.getConstantFP(Point5Pred, DL, VT));
1867 
1868   // Truncate to integer and convert back to fp.
1869   MVT IntVT = VT.changeVectorElementTypeToInteger();
1870   SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust);
1871   Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated);
1872 
1873   // Restore the original sign.
1874   Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src);
1875 
1876   // Determine the largest integer that can be represented exactly. This and
1877   // values larger than it don't have any fractional bits so don't need to
1878   // be converted.
1879   unsigned Precision = APFloat::semanticsPrecision(FltSem);
1880   APFloat MaxVal = APFloat(FltSem);
1881   MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1),
1882                           /*IsSigned*/ false, APFloat::rmNearestTiesToEven);
1883   SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT);
1884 
1885   // If abs(Src) was larger than MaxVal or nan, keep it.
1886   MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1887   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT);
1888   return DAG.getSelect(DL, VT, Setcc, Truncated, Src);
1889 }
1890 
1891 struct VIDSequence {
1892   int64_t StepNumerator;
1893   unsigned StepDenominator;
1894   int64_t Addend;
1895 };
1896 
1897 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1898 // to the (non-zero) step S and start value X. This can be then lowered as the
1899 // RVV sequence (VID * S) + X, for example.
1900 // The step S is represented as an integer numerator divided by a positive
1901 // denominator. Note that the implementation currently only identifies
1902 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1903 // cannot detect 2/3, for example.
1904 // Note that this method will also match potentially unappealing index
1905 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1906 // determine whether this is worth generating code for.
1907 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1908   unsigned NumElts = Op.getNumOperands();
1909   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1910   if (!Op.getValueType().isInteger())
1911     return None;
1912 
1913   Optional<unsigned> SeqStepDenom;
1914   Optional<int64_t> SeqStepNum, SeqAddend;
1915   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1916   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1917   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1918     // Assume undef elements match the sequence; we just have to be careful
1919     // when interpolating across them.
1920     if (Op.getOperand(Idx).isUndef())
1921       continue;
1922     // The BUILD_VECTOR must be all constants.
1923     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1924       return None;
1925 
1926     uint64_t Val = Op.getConstantOperandVal(Idx) &
1927                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1928 
1929     if (PrevElt) {
1930       // Calculate the step since the last non-undef element, and ensure
1931       // it's consistent across the entire sequence.
1932       unsigned IdxDiff = Idx - PrevElt->second;
1933       int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1934 
1935       // A zero-value value difference means that we're somewhere in the middle
1936       // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1937       // step change before evaluating the sequence.
1938       if (ValDiff != 0) {
1939         int64_t Remainder = ValDiff % IdxDiff;
1940         // Normalize the step if it's greater than 1.
1941         if (Remainder != ValDiff) {
1942           // The difference must cleanly divide the element span.
1943           if (Remainder != 0)
1944             return None;
1945           ValDiff /= IdxDiff;
1946           IdxDiff = 1;
1947         }
1948 
1949         if (!SeqStepNum)
1950           SeqStepNum = ValDiff;
1951         else if (ValDiff != SeqStepNum)
1952           return None;
1953 
1954         if (!SeqStepDenom)
1955           SeqStepDenom = IdxDiff;
1956         else if (IdxDiff != *SeqStepDenom)
1957           return None;
1958       }
1959     }
1960 
1961     // Record and/or check any addend.
1962     if (SeqStepNum && SeqStepDenom) {
1963       uint64_t ExpectedVal =
1964           (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1965       int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1966       if (!SeqAddend)
1967         SeqAddend = Addend;
1968       else if (SeqAddend != Addend)
1969         return None;
1970     }
1971 
1972     // Record this non-undef element for later.
1973     if (!PrevElt || PrevElt->first != Val)
1974       PrevElt = std::make_pair(Val, Idx);
1975   }
1976   // We need to have logged both a step and an addend for this to count as
1977   // a legal index sequence.
1978   if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1979     return None;
1980 
1981   return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1982 }
1983 
1984 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
1985 // and lower it as a VRGATHER_VX_VL from the source vector.
1986 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
1987                                   SelectionDAG &DAG,
1988                                   const RISCVSubtarget &Subtarget) {
1989   if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
1990     return SDValue();
1991   SDValue Vec = SplatVal.getOperand(0);
1992   // Only perform this optimization on vectors of the same size for simplicity.
1993   if (Vec.getValueType() != VT)
1994     return SDValue();
1995   SDValue Idx = SplatVal.getOperand(1);
1996   // The index must be a legal type.
1997   if (Idx.getValueType() != Subtarget.getXLenVT())
1998     return SDValue();
1999 
2000   MVT ContainerVT = VT;
2001   if (VT.isFixedLengthVector()) {
2002     ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2003     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2004   }
2005 
2006   SDValue Mask, VL;
2007   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2008 
2009   SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec,
2010                                Idx, Mask, VL);
2011 
2012   if (!VT.isFixedLengthVector())
2013     return Gather;
2014 
2015   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2016 }
2017 
2018 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
2019                                  const RISCVSubtarget &Subtarget) {
2020   MVT VT = Op.getSimpleValueType();
2021   assert(VT.isFixedLengthVector() && "Unexpected vector!");
2022 
2023   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2024 
2025   SDLoc DL(Op);
2026   SDValue Mask, VL;
2027   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2028 
2029   MVT XLenVT = Subtarget.getXLenVT();
2030   unsigned NumElts = Op.getNumOperands();
2031 
2032   if (VT.getVectorElementType() == MVT::i1) {
2033     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
2034       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
2035       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
2036     }
2037 
2038     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
2039       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
2040       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
2041     }
2042 
2043     // Lower constant mask BUILD_VECTORs via an integer vector type, in
2044     // scalar integer chunks whose bit-width depends on the number of mask
2045     // bits and XLEN.
2046     // First, determine the most appropriate scalar integer type to use. This
2047     // is at most XLenVT, but may be shrunk to a smaller vector element type
2048     // according to the size of the final vector - use i8 chunks rather than
2049     // XLenVT if we're producing a v8i1. This results in more consistent
2050     // codegen across RV32 and RV64.
2051     unsigned NumViaIntegerBits =
2052         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
2053     NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN());
2054     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
2055       // If we have to use more than one INSERT_VECTOR_ELT then this
2056       // optimization is likely to increase code size; avoid peforming it in
2057       // such a case. We can use a load from a constant pool in this case.
2058       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
2059         return SDValue();
2060       // Now we can create our integer vector type. Note that it may be larger
2061       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
2062       MVT IntegerViaVecVT =
2063           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
2064                            divideCeil(NumElts, NumViaIntegerBits));
2065 
2066       uint64_t Bits = 0;
2067       unsigned BitPos = 0, IntegerEltIdx = 0;
2068       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
2069 
2070       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
2071         // Once we accumulate enough bits to fill our scalar type, insert into
2072         // our vector and clear our accumulated data.
2073         if (I != 0 && I % NumViaIntegerBits == 0) {
2074           if (NumViaIntegerBits <= 32)
2075             Bits = SignExtend64(Bits, 32);
2076           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2077           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
2078                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2079           Bits = 0;
2080           BitPos = 0;
2081           IntegerEltIdx++;
2082         }
2083         SDValue V = Op.getOperand(I);
2084         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
2085         Bits |= ((uint64_t)BitValue << BitPos);
2086       }
2087 
2088       // Insert the (remaining) scalar value into position in our integer
2089       // vector type.
2090       if (NumViaIntegerBits <= 32)
2091         Bits = SignExtend64(Bits, 32);
2092       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
2093       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
2094                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
2095 
2096       if (NumElts < NumViaIntegerBits) {
2097         // If we're producing a smaller vector than our minimum legal integer
2098         // type, bitcast to the equivalent (known-legal) mask type, and extract
2099         // our final mask.
2100         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
2101         Vec = DAG.getBitcast(MVT::v8i1, Vec);
2102         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
2103                           DAG.getConstant(0, DL, XLenVT));
2104       } else {
2105         // Else we must have produced an integer type with the same size as the
2106         // mask type; bitcast for the final result.
2107         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
2108         Vec = DAG.getBitcast(VT, Vec);
2109       }
2110 
2111       return Vec;
2112     }
2113 
2114     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
2115     // vector type, we have a legal equivalently-sized i8 type, so we can use
2116     // that.
2117     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
2118     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
2119 
2120     SDValue WideVec;
2121     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2122       // For a splat, perform a scalar truncate before creating the wider
2123       // vector.
2124       assert(Splat.getValueType() == XLenVT &&
2125              "Unexpected type for i1 splat value");
2126       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
2127                           DAG.getConstant(1, DL, XLenVT));
2128       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
2129     } else {
2130       SmallVector<SDValue, 8> Ops(Op->op_values());
2131       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
2132       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
2133       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
2134     }
2135 
2136     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
2137   }
2138 
2139   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
2140     if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget))
2141       return Gather;
2142     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
2143                                         : RISCVISD::VMV_V_X_VL;
2144     Splat =
2145         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL);
2146     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2147   }
2148 
2149   // Try and match index sequences, which we can lower to the vid instruction
2150   // with optional modifications. An all-undef vector is matched by
2151   // getSplatValue, above.
2152   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
2153     int64_t StepNumerator = SimpleVID->StepNumerator;
2154     unsigned StepDenominator = SimpleVID->StepDenominator;
2155     int64_t Addend = SimpleVID->Addend;
2156 
2157     assert(StepNumerator != 0 && "Invalid step");
2158     bool Negate = false;
2159     int64_t SplatStepVal = StepNumerator;
2160     unsigned StepOpcode = ISD::MUL;
2161     if (StepNumerator != 1) {
2162       if (isPowerOf2_64(std::abs(StepNumerator))) {
2163         Negate = StepNumerator < 0;
2164         StepOpcode = ISD::SHL;
2165         SplatStepVal = Log2_64(std::abs(StepNumerator));
2166       }
2167     }
2168 
2169     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
2170     // threshold since it's the immediate value many RVV instructions accept.
2171     // There is no vmul.vi instruction so ensure multiply constant can fit in
2172     // a single addi instruction.
2173     if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) ||
2174          (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) &&
2175         isPowerOf2_32(StepDenominator) && isInt<5>(Addend)) {
2176       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
2177       // Convert right out of the scalable type so we can use standard ISD
2178       // nodes for the rest of the computation. If we used scalable types with
2179       // these, we'd lose the fixed-length vector info and generate worse
2180       // vsetvli code.
2181       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
2182       if ((StepOpcode == ISD::MUL && SplatStepVal != 1) ||
2183           (StepOpcode == ISD::SHL && SplatStepVal != 0)) {
2184         SDValue SplatStep = DAG.getSplatBuildVector(
2185             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
2186         VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep);
2187       }
2188       if (StepDenominator != 1) {
2189         SDValue SplatStep = DAG.getSplatBuildVector(
2190             VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
2191         VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
2192       }
2193       if (Addend != 0 || Negate) {
2194         SDValue SplatAddend = DAG.getSplatBuildVector(
2195             VT, DL, DAG.getConstant(Addend, DL, XLenVT));
2196         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
2197       }
2198       return VID;
2199     }
2200   }
2201 
2202   // Attempt to detect "hidden" splats, which only reveal themselves as splats
2203   // when re-interpreted as a vector with a larger element type. For example,
2204   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
2205   // could be instead splat as
2206   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
2207   // TODO: This optimization could also work on non-constant splats, but it
2208   // would require bit-manipulation instructions to construct the splat value.
2209   SmallVector<SDValue> Sequence;
2210   unsigned EltBitSize = VT.getScalarSizeInBits();
2211   const auto *BV = cast<BuildVectorSDNode>(Op);
2212   if (VT.isInteger() && EltBitSize < 64 &&
2213       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
2214       BV->getRepeatedSequence(Sequence) &&
2215       (Sequence.size() * EltBitSize) <= 64) {
2216     unsigned SeqLen = Sequence.size();
2217     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
2218     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
2219     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
2220             ViaIntVT == MVT::i64) &&
2221            "Unexpected sequence type");
2222 
2223     unsigned EltIdx = 0;
2224     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
2225     uint64_t SplatValue = 0;
2226     // Construct the amalgamated value which can be splatted as this larger
2227     // vector type.
2228     for (const auto &SeqV : Sequence) {
2229       if (!SeqV.isUndef())
2230         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
2231                        << (EltIdx * EltBitSize));
2232       EltIdx++;
2233     }
2234 
2235     // On RV64, sign-extend from 32 to 64 bits where possible in order to
2236     // achieve better constant materializion.
2237     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
2238       SplatValue = SignExtend64(SplatValue, 32);
2239 
2240     // Since we can't introduce illegal i64 types at this stage, we can only
2241     // perform an i64 splat on RV32 if it is its own sign-extended value. That
2242     // way we can use RVV instructions to splat.
2243     assert((ViaIntVT.bitsLE(XLenVT) ||
2244             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
2245            "Unexpected bitcast sequence");
2246     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
2247       SDValue ViaVL =
2248           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
2249       MVT ViaContainerVT =
2250           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
2251       SDValue Splat =
2252           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
2253                       DAG.getUNDEF(ViaContainerVT),
2254                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
2255       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
2256       return DAG.getBitcast(VT, Splat);
2257     }
2258   }
2259 
2260   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
2261   // which constitute a large proportion of the elements. In such cases we can
2262   // splat a vector with the dominant element and make up the shortfall with
2263   // INSERT_VECTOR_ELTs.
2264   // Note that this includes vectors of 2 elements by association. The
2265   // upper-most element is the "dominant" one, allowing us to use a splat to
2266   // "insert" the upper element, and an insert of the lower element at position
2267   // 0, which improves codegen.
2268   SDValue DominantValue;
2269   unsigned MostCommonCount = 0;
2270   DenseMap<SDValue, unsigned> ValueCounts;
2271   unsigned NumUndefElts =
2272       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
2273 
2274   // Track the number of scalar loads we know we'd be inserting, estimated as
2275   // any non-zero floating-point constant. Other kinds of element are either
2276   // already in registers or are materialized on demand. The threshold at which
2277   // a vector load is more desirable than several scalar materializion and
2278   // vector-insertion instructions is not known.
2279   unsigned NumScalarLoads = 0;
2280 
2281   for (SDValue V : Op->op_values()) {
2282     if (V.isUndef())
2283       continue;
2284 
2285     ValueCounts.insert(std::make_pair(V, 0));
2286     unsigned &Count = ValueCounts[V];
2287 
2288     if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
2289       NumScalarLoads += !CFP->isExactlyValue(+0.0);
2290 
2291     // Is this value dominant? In case of a tie, prefer the highest element as
2292     // it's cheaper to insert near the beginning of a vector than it is at the
2293     // end.
2294     if (++Count >= MostCommonCount) {
2295       DominantValue = V;
2296       MostCommonCount = Count;
2297     }
2298   }
2299 
2300   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
2301   unsigned NumDefElts = NumElts - NumUndefElts;
2302   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
2303 
2304   // Don't perform this optimization when optimizing for size, since
2305   // materializing elements and inserting them tends to cause code bloat.
2306   if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
2307       ((MostCommonCount > DominantValueCountThreshold) ||
2308        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
2309     // Start by splatting the most common element.
2310     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
2311 
2312     DenseSet<SDValue> Processed{DominantValue};
2313     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
2314     for (const auto &OpIdx : enumerate(Op->ops())) {
2315       const SDValue &V = OpIdx.value();
2316       if (V.isUndef() || !Processed.insert(V).second)
2317         continue;
2318       if (ValueCounts[V] == 1) {
2319         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
2320                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
2321       } else {
2322         // Blend in all instances of this value using a VSELECT, using a
2323         // mask where each bit signals whether that element is the one
2324         // we're after.
2325         SmallVector<SDValue> Ops;
2326         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
2327           return DAG.getConstant(V == V1, DL, XLenVT);
2328         });
2329         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
2330                           DAG.getBuildVector(SelMaskTy, DL, Ops),
2331                           DAG.getSplatBuildVector(VT, DL, V), Vec);
2332       }
2333     }
2334 
2335     return Vec;
2336   }
2337 
2338   return SDValue();
2339 }
2340 
2341 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2342                                    SDValue Lo, SDValue Hi, SDValue VL,
2343                                    SelectionDAG &DAG) {
2344   if (!Passthru)
2345     Passthru = DAG.getUNDEF(VT);
2346   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2347     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2348     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2349     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2350     // node in order to try and match RVV vector/scalar instructions.
2351     if ((LoC >> 31) == HiC)
2352       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL);
2353 
2354     // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use
2355     // vmv.v.x whose EEW = 32 to lower it.
2356     auto *Const = dyn_cast<ConstantSDNode>(VL);
2357     if (LoC == HiC && Const && Const->isAllOnesValue()) {
2358       MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
2359       // TODO: if vl <= min(VLMAX), we can also do this. But we could not
2360       // access the subtarget here now.
2361       auto InterVec = DAG.getNode(
2362           RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo,
2363                                   DAG.getRegister(RISCV::X0, MVT::i32));
2364       return DAG.getNode(ISD::BITCAST, DL, VT, InterVec);
2365     }
2366   }
2367 
2368   // Fall back to a stack store and stride x0 vector load.
2369   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo,
2370                      Hi, VL);
2371 }
2372 
2373 // Called by type legalization to handle splat of i64 on RV32.
2374 // FIXME: We can optimize this when the type has sign or zero bits in one
2375 // of the halves.
2376 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
2377                                    SDValue Scalar, SDValue VL,
2378                                    SelectionDAG &DAG) {
2379   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2380   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2381                            DAG.getConstant(0, DL, MVT::i32));
2382   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2383                            DAG.getConstant(1, DL, MVT::i32));
2384   return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG);
2385 }
2386 
2387 // This function lowers a splat of a scalar operand Splat with the vector
2388 // length VL. It ensures the final sequence is type legal, which is useful when
2389 // lowering a splat after type legalization.
2390 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
2391                                 MVT VT, SDLoc DL, SelectionDAG &DAG,
2392                                 const RISCVSubtarget &Subtarget) {
2393   bool HasPassthru = Passthru && !Passthru.isUndef();
2394   if (!HasPassthru && !Passthru)
2395     Passthru = DAG.getUNDEF(VT);
2396   if (VT.isFloatingPoint()) {
2397     // If VL is 1, we could use vfmv.s.f.
2398     if (isOneConstant(VL))
2399       return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL);
2400     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL);
2401   }
2402 
2403   MVT XLenVT = Subtarget.getXLenVT();
2404 
2405   // Simplest case is that the operand needs to be promoted to XLenVT.
2406   if (Scalar.getValueType().bitsLE(XLenVT)) {
2407     // If the operand is a constant, sign extend to increase our chances
2408     // of being able to use a .vi instruction. ANY_EXTEND would become a
2409     // a zero extend and the simm5 check in isel would fail.
2410     // FIXME: Should we ignore the upper bits in isel instead?
2411     unsigned ExtOpc =
2412         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2413     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2414     ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar);
2415     // If VL is 1 and the scalar value won't benefit from immediate, we could
2416     // use vmv.s.x.
2417     if (isOneConstant(VL) &&
2418         (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue())))
2419       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL);
2420     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL);
2421   }
2422 
2423   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2424          "Unexpected scalar for splat lowering!");
2425 
2426   if (isOneConstant(VL) && isNullConstant(Scalar))
2427     return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru,
2428                        DAG.getConstant(0, DL, XLenVT), VL);
2429 
2430   // Otherwise use the more complicated splatting algorithm.
2431   return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG);
2432 }
2433 
2434 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources,
2435                                 const RISCVSubtarget &Subtarget) {
2436   // We need to be able to widen elements to the next larger integer type.
2437   if (VT.getScalarSizeInBits() >= Subtarget.getELEN())
2438     return false;
2439 
2440   int Size = Mask.size();
2441   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
2442 
2443   int Srcs[] = {-1, -1};
2444   for (int i = 0; i != Size; ++i) {
2445     // Ignore undef elements.
2446     if (Mask[i] < 0)
2447       continue;
2448 
2449     // Is this an even or odd element.
2450     int Pol = i % 2;
2451 
2452     // Ensure we consistently use the same source for this element polarity.
2453     int Src = Mask[i] / Size;
2454     if (Srcs[Pol] < 0)
2455       Srcs[Pol] = Src;
2456     if (Srcs[Pol] != Src)
2457       return false;
2458 
2459     // Make sure the element within the source is appropriate for this element
2460     // in the destination.
2461     int Elt = Mask[i] % Size;
2462     if (Elt != i / 2)
2463       return false;
2464   }
2465 
2466   // We need to find a source for each polarity and they can't be the same.
2467   if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1])
2468     return false;
2469 
2470   // Swap the sources if the second source was in the even polarity.
2471   SwapSources = Srcs[0] > Srcs[1];
2472 
2473   return true;
2474 }
2475 
2476 /// Match shuffles that concatenate two vectors, rotate the concatenation,
2477 /// and then extract the original number of elements from the rotated result.
2478 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The
2479 /// returned rotation amount is for a rotate right, where elements move from
2480 /// higher elements to lower elements. \p LoSrc indicates the first source
2481 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
2482 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
2483 /// 0 or 1 if a rotation is found.
2484 ///
2485 /// NOTE: We talk about rotate to the right which matches how bit shift and
2486 /// rotate instructions are described where LSBs are on the right, but LLVM IR
2487 /// and the table below write vectors with the lowest elements on the left.
2488 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {
2489   int Size = Mask.size();
2490 
2491   // We need to detect various ways of spelling a rotation:
2492   //   [11, 12, 13, 14, 15,  0,  1,  2]
2493   //   [-1, 12, 13, 14, -1, -1,  1, -1]
2494   //   [-1, -1, -1, -1, -1, -1,  1,  2]
2495   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
2496   //   [-1,  4,  5,  6, -1, -1,  9, -1]
2497   //   [-1,  4,  5,  6, -1, -1, -1, -1]
2498   int Rotation = 0;
2499   LoSrc = -1;
2500   HiSrc = -1;
2501   for (int i = 0; i != Size; ++i) {
2502     int M = Mask[i];
2503     if (M < 0)
2504       continue;
2505 
2506     // Determine where a rotate vector would have started.
2507     int StartIdx = i - (M % Size);
2508     // The identity rotation isn't interesting, stop.
2509     if (StartIdx == 0)
2510       return -1;
2511 
2512     // If we found the tail of a vector the rotation must be the missing
2513     // front. If we found the head of a vector, it must be how much of the
2514     // head.
2515     int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
2516 
2517     if (Rotation == 0)
2518       Rotation = CandidateRotation;
2519     else if (Rotation != CandidateRotation)
2520       // The rotations don't match, so we can't match this mask.
2521       return -1;
2522 
2523     // Compute which value this mask is pointing at.
2524     int MaskSrc = M < Size ? 0 : 1;
2525 
2526     // Compute which of the two target values this index should be assigned to.
2527     // This reflects whether the high elements are remaining or the low elemnts
2528     // are remaining.
2529     int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc;
2530 
2531     // Either set up this value if we've not encountered it before, or check
2532     // that it remains consistent.
2533     if (TargetSrc < 0)
2534       TargetSrc = MaskSrc;
2535     else if (TargetSrc != MaskSrc)
2536       // This may be a rotation, but it pulls from the inputs in some
2537       // unsupported interleaving.
2538       return -1;
2539   }
2540 
2541   // Check that we successfully analyzed the mask, and normalize the results.
2542   assert(Rotation != 0 && "Failed to locate a viable rotation!");
2543   assert((LoSrc >= 0 || HiSrc >= 0) &&
2544          "Failed to find a rotated input vector!");
2545 
2546   return Rotation;
2547 }
2548 
2549 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2550                                    const RISCVSubtarget &Subtarget) {
2551   SDValue V1 = Op.getOperand(0);
2552   SDValue V2 = Op.getOperand(1);
2553   SDLoc DL(Op);
2554   MVT XLenVT = Subtarget.getXLenVT();
2555   MVT VT = Op.getSimpleValueType();
2556   unsigned NumElts = VT.getVectorNumElements();
2557   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2558 
2559   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2560 
2561   SDValue TrueMask, VL;
2562   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2563 
2564   if (SVN->isSplat()) {
2565     const int Lane = SVN->getSplatIndex();
2566     if (Lane >= 0) {
2567       MVT SVT = VT.getVectorElementType();
2568 
2569       // Turn splatted vector load into a strided load with an X0 stride.
2570       SDValue V = V1;
2571       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2572       // with undef.
2573       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2574       int Offset = Lane;
2575       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2576         int OpElements =
2577             V.getOperand(0).getSimpleValueType().getVectorNumElements();
2578         V = V.getOperand(Offset / OpElements);
2579         Offset %= OpElements;
2580       }
2581 
2582       // We need to ensure the load isn't atomic or volatile.
2583       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2584         auto *Ld = cast<LoadSDNode>(V);
2585         Offset *= SVT.getStoreSize();
2586         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2587                                                    TypeSize::Fixed(Offset), DL);
2588 
2589         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2590         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2591           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2592           SDValue IntID =
2593               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2594           SDValue Ops[] = {Ld->getChain(),
2595                            IntID,
2596                            DAG.getUNDEF(ContainerVT),
2597                            NewAddr,
2598                            DAG.getRegister(RISCV::X0, XLenVT),
2599                            VL};
2600           SDValue NewLoad = DAG.getMemIntrinsicNode(
2601               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2602               DAG.getMachineFunction().getMachineMemOperand(
2603                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2604           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2605           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2606         }
2607 
2608         // Otherwise use a scalar load and splat. This will give the best
2609         // opportunity to fold a splat into the operation. ISel can turn it into
2610         // the x0 strided load if we aren't able to fold away the select.
2611         if (SVT.isFloatingPoint())
2612           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2613                           Ld->getPointerInfo().getWithOffset(Offset),
2614                           Ld->getOriginalAlign(),
2615                           Ld->getMemOperand()->getFlags());
2616         else
2617           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2618                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
2619                              Ld->getOriginalAlign(),
2620                              Ld->getMemOperand()->getFlags());
2621         DAG.makeEquivalentMemoryOrdering(Ld, V);
2622 
2623         unsigned Opc =
2624             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2625         SDValue Splat =
2626             DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL);
2627         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2628       }
2629 
2630       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2631       assert(Lane < (int)NumElts && "Unexpected lane!");
2632       SDValue Gather =
2633           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2634                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2635       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2636     }
2637   }
2638 
2639   ArrayRef<int> Mask = SVN->getMask();
2640 
2641   // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may
2642   // be undef which can be handled with a single SLIDEDOWN/UP.
2643   int LoSrc, HiSrc;
2644   int Rotation = isElementRotate(LoSrc, HiSrc, Mask);
2645   if (Rotation > 0) {
2646     SDValue LoV, HiV;
2647     if (LoSrc >= 0) {
2648       LoV = LoSrc == 0 ? V1 : V2;
2649       LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget);
2650     }
2651     if (HiSrc >= 0) {
2652       HiV = HiSrc == 0 ? V1 : V2;
2653       HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget);
2654     }
2655 
2656     // We found a rotation. We need to slide HiV down by Rotation. Then we need
2657     // to slide LoV up by (NumElts - Rotation).
2658     unsigned InvRotate = NumElts - Rotation;
2659 
2660     SDValue Res = DAG.getUNDEF(ContainerVT);
2661     if (HiV) {
2662       // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
2663       // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
2664       // causes multiple vsetvlis in some test cases such as lowering
2665       // reduce.mul
2666       SDValue DownVL = VL;
2667       if (LoV)
2668         DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
2669       Res =
2670           DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV,
2671                       DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL);
2672     }
2673     if (LoV)
2674       Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV,
2675                         DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL);
2676 
2677     return convertFromScalableVector(VT, Res, DAG, Subtarget);
2678   }
2679 
2680   // Detect an interleave shuffle and lower to
2681   // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1))
2682   bool SwapSources;
2683   if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) {
2684     // Swap sources if needed.
2685     if (SwapSources)
2686       std::swap(V1, V2);
2687 
2688     // Extract the lower half of the vectors.
2689     MVT HalfVT = VT.getHalfNumVectorElementsVT();
2690     V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
2691                      DAG.getConstant(0, DL, XLenVT));
2692     V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2,
2693                      DAG.getConstant(0, DL, XLenVT));
2694 
2695     // Double the element width and halve the number of elements in an int type.
2696     unsigned EltBits = VT.getScalarSizeInBits();
2697     MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2);
2698     MVT WideIntVT =
2699         MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2);
2700     // Convert this to a scalable vector. We need to base this on the
2701     // destination size to ensure there's always a type with a smaller LMUL.
2702     MVT WideIntContainerVT =
2703         getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget);
2704 
2705     // Convert sources to scalable vectors with the same element count as the
2706     // larger type.
2707     MVT HalfContainerVT = MVT::getVectorVT(
2708         VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount());
2709     V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget);
2710     V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget);
2711 
2712     // Cast sources to integer.
2713     MVT IntEltVT = MVT::getIntegerVT(EltBits);
2714     MVT IntHalfVT =
2715         MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount());
2716     V1 = DAG.getBitcast(IntHalfVT, V1);
2717     V2 = DAG.getBitcast(IntHalfVT, V2);
2718 
2719     // Freeze V2 since we use it twice and we need to be sure that the add and
2720     // multiply see the same value.
2721     V2 = DAG.getFreeze(V2);
2722 
2723     // Recreate TrueMask using the widened type's element count.
2724     MVT MaskVT =
2725         MVT::getVectorVT(MVT::i1, HalfContainerVT.getVectorElementCount());
2726     TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2727 
2728     // Widen V1 and V2 with 0s and add one copy of V2 to V1.
2729     SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1,
2730                               V2, TrueMask, VL);
2731     // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer.
2732     SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT,
2733                                      DAG.getUNDEF(IntHalfVT),
2734                                      DAG.getAllOnesConstant(DL, XLenVT));
2735     SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT,
2736                                    V2, Multiplier, TrueMask, VL);
2737     // Add the new copies to our previous addition giving us 2^eltbits copies of
2738     // V2. This is equivalent to shifting V2 left by eltbits. This should
2739     // combine with the vwmulu.vv above to form vwmaccu.vv.
2740     Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul,
2741                       TrueMask, VL);
2742     // Cast back to ContainerVT. We need to re-create a new ContainerVT in case
2743     // WideIntContainerVT is a larger fractional LMUL than implied by the fixed
2744     // vector VT.
2745     ContainerVT =
2746         MVT::getVectorVT(VT.getVectorElementType(),
2747                          WideIntContainerVT.getVectorElementCount() * 2);
2748     Add = DAG.getBitcast(ContainerVT, Add);
2749     return convertFromScalableVector(VT, Add, DAG, Subtarget);
2750   }
2751 
2752   // Detect shuffles which can be re-expressed as vector selects; these are
2753   // shuffles in which each element in the destination is taken from an element
2754   // at the corresponding index in either source vectors.
2755   bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) {
2756     int MaskIndex = MaskIdx.value();
2757     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2758   });
2759 
2760   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2761 
2762   SmallVector<SDValue> MaskVals;
2763   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2764   // merged with a second vrgather.
2765   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2766 
2767   // By default we preserve the original operand order, and use a mask to
2768   // select LHS as true and RHS as false. However, since RVV vector selects may
2769   // feature splats but only on the LHS, we may choose to invert our mask and
2770   // instead select between RHS and LHS.
2771   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2772   bool InvertMask = IsSelect == SwapOps;
2773 
2774   // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2775   // half.
2776   DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2777 
2778   // Now construct the mask that will be used by the vselect or blended
2779   // vrgather operation. For vrgathers, construct the appropriate indices into
2780   // each vector.
2781   for (int MaskIndex : Mask) {
2782     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2783     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2784     if (!IsSelect) {
2785       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2786       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2787                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
2788                                      : DAG.getUNDEF(XLenVT));
2789       GatherIndicesRHS.push_back(
2790           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2791                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2792       if (IsLHSOrUndefIndex && MaskIndex >= 0)
2793         ++LHSIndexCounts[MaskIndex];
2794       if (!IsLHSOrUndefIndex)
2795         ++RHSIndexCounts[MaskIndex - NumElts];
2796     }
2797   }
2798 
2799   if (SwapOps) {
2800     std::swap(V1, V2);
2801     std::swap(GatherIndicesLHS, GatherIndicesRHS);
2802   }
2803 
2804   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2805   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2806   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2807 
2808   if (IsSelect)
2809     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2810 
2811   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2812     // On such a large vector we're unable to use i8 as the index type.
2813     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2814     // may involve vector splitting if we're already at LMUL=8, or our
2815     // user-supplied maximum fixed-length LMUL.
2816     return SDValue();
2817   }
2818 
2819   unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2820   unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2821   MVT IndexVT = VT.changeTypeToInteger();
2822   // Since we can't introduce illegal index types at this stage, use i16 and
2823   // vrgatherei16 if the corresponding index type for plain vrgather is greater
2824   // than XLenVT.
2825   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2826     GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2827     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2828   }
2829 
2830   MVT IndexContainerVT =
2831       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2832 
2833   SDValue Gather;
2834   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2835   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2836   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2837     Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG,
2838                               Subtarget);
2839   } else {
2840     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2841     // If only one index is used, we can use a "splat" vrgather.
2842     // TODO: We can splat the most-common index and fix-up any stragglers, if
2843     // that's beneficial.
2844     if (LHSIndexCounts.size() == 1) {
2845       int SplatIndex = LHSIndexCounts.begin()->getFirst();
2846       Gather =
2847           DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2848                       DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2849     } else {
2850       SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2851       LHSIndices =
2852           convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2853 
2854       Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2855                            TrueMask, VL);
2856     }
2857   }
2858 
2859   // If a second vector operand is used by this shuffle, blend it in with an
2860   // additional vrgather.
2861   if (!V2.isUndef()) {
2862     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2863     // If only one index is used, we can use a "splat" vrgather.
2864     // TODO: We can splat the most-common index and fix-up any stragglers, if
2865     // that's beneficial.
2866     if (RHSIndexCounts.size() == 1) {
2867       int SplatIndex = RHSIndexCounts.begin()->getFirst();
2868       V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2869                        DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2870     } else {
2871       SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2872       RHSIndices =
2873           convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2874       V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2875                        VL);
2876     }
2877 
2878     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2879     SelectMask =
2880         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2881 
2882     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2883                          Gather, VL);
2884   }
2885 
2886   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2887 }
2888 
2889 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
2890   // Support splats for any type. These should type legalize well.
2891   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
2892     return true;
2893 
2894   // Only support legal VTs for other shuffles for now.
2895   if (!isTypeLegal(VT))
2896     return false;
2897 
2898   MVT SVT = VT.getSimpleVT();
2899 
2900   bool SwapSources;
2901   int LoSrc, HiSrc;
2902   return (isElementRotate(LoSrc, HiSrc, M) > 0) ||
2903          isInterleaveShuffle(M, SVT, SwapSources, Subtarget);
2904 }
2905 
2906 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2907                                      SDLoc DL, SelectionDAG &DAG,
2908                                      const RISCVSubtarget &Subtarget) {
2909   if (VT.isScalableVector())
2910     return DAG.getFPExtendOrRound(Op, DL, VT);
2911   assert(VT.isFixedLengthVector() &&
2912          "Unexpected value type for RVV FP extend/round lowering");
2913   SDValue Mask, VL;
2914   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2915   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2916                         ? RISCVISD::FP_EXTEND_VL
2917                         : RISCVISD::FP_ROUND_VL;
2918   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2919 }
2920 
2921 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
2922 // the exponent.
2923 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
2924   MVT VT = Op.getSimpleValueType();
2925   unsigned EltSize = VT.getScalarSizeInBits();
2926   SDValue Src = Op.getOperand(0);
2927   SDLoc DL(Op);
2928 
2929   // We need a FP type that can represent the value.
2930   // TODO: Use f16 for i8 when possible?
2931   MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32;
2932   MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount());
2933 
2934   // Legal types should have been checked in the RISCVTargetLowering
2935   // constructor.
2936   // TODO: Splitting may make sense in some cases.
2937   assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) &&
2938          "Expected legal float type!");
2939 
2940   // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X.
2941   // The trailing zero count is equal to log2 of this single bit value.
2942   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
2943     SDValue Neg =
2944         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
2945     Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg);
2946   }
2947 
2948   // We have a legal FP type, convert to it.
2949   SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src);
2950   // Bitcast to integer and shift the exponent to the LSB.
2951   EVT IntVT = FloatVT.changeVectorElementTypeToInteger();
2952   SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal);
2953   unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23;
2954   SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast,
2955                               DAG.getConstant(ShiftAmt, DL, IntVT));
2956   // Truncate back to original type to allow vnsrl.
2957   SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift);
2958   // The exponent contains log2 of the value in biased form.
2959   unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127;
2960 
2961   // For trailing zeros, we just need to subtract the bias.
2962   if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF)
2963     return DAG.getNode(ISD::SUB, DL, VT, Trunc,
2964                        DAG.getConstant(ExponentBias, DL, VT));
2965 
2966   // For leading zeros, we need to remove the bias and convert from log2 to
2967   // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)).
2968   unsigned Adjust = ExponentBias + (EltSize - 1);
2969   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc);
2970 }
2971 
2972 // While RVV has alignment restrictions, we should always be able to load as a
2973 // legal equivalently-sized byte-typed vector instead. This method is
2974 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2975 // the load is already correctly-aligned, it returns SDValue().
2976 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2977                                                     SelectionDAG &DAG) const {
2978   auto *Load = cast<LoadSDNode>(Op);
2979   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2980 
2981   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2982                                      Load->getMemoryVT(),
2983                                      *Load->getMemOperand()))
2984     return SDValue();
2985 
2986   SDLoc DL(Op);
2987   MVT VT = Op.getSimpleValueType();
2988   unsigned EltSizeBits = VT.getScalarSizeInBits();
2989   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2990          "Unexpected unaligned RVV load type");
2991   MVT NewVT =
2992       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2993   assert(NewVT.isValid() &&
2994          "Expecting equally-sized RVV vector types to be legal");
2995   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2996                           Load->getPointerInfo(), Load->getOriginalAlign(),
2997                           Load->getMemOperand()->getFlags());
2998   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2999 }
3000 
3001 // While RVV has alignment restrictions, we should always be able to store as a
3002 // legal equivalently-sized byte-typed vector instead. This method is
3003 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
3004 // returns SDValue() if the store is already correctly aligned.
3005 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
3006                                                      SelectionDAG &DAG) const {
3007   auto *Store = cast<StoreSDNode>(Op);
3008   assert(Store && Store->getValue().getValueType().isVector() &&
3009          "Expected vector store");
3010 
3011   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
3012                                      Store->getMemoryVT(),
3013                                      *Store->getMemOperand()))
3014     return SDValue();
3015 
3016   SDLoc DL(Op);
3017   SDValue StoredVal = Store->getValue();
3018   MVT VT = StoredVal.getSimpleValueType();
3019   unsigned EltSizeBits = VT.getScalarSizeInBits();
3020   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
3021          "Unexpected unaligned RVV store type");
3022   MVT NewVT =
3023       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
3024   assert(NewVT.isValid() &&
3025          "Expecting equally-sized RVV vector types to be legal");
3026   StoredVal = DAG.getBitcast(NewVT, StoredVal);
3027   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
3028                       Store->getPointerInfo(), Store->getOriginalAlign(),
3029                       Store->getMemOperand()->getFlags());
3030 }
3031 
3032 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
3033                                             SelectionDAG &DAG) const {
3034   switch (Op.getOpcode()) {
3035   default:
3036     report_fatal_error("unimplemented operand");
3037   case ISD::GlobalAddress:
3038     return lowerGlobalAddress(Op, DAG);
3039   case ISD::BlockAddress:
3040     return lowerBlockAddress(Op, DAG);
3041   case ISD::ConstantPool:
3042     return lowerConstantPool(Op, DAG);
3043   case ISD::JumpTable:
3044     return lowerJumpTable(Op, DAG);
3045   case ISD::GlobalTLSAddress:
3046     return lowerGlobalTLSAddress(Op, DAG);
3047   case ISD::SELECT:
3048     return lowerSELECT(Op, DAG);
3049   case ISD::BRCOND:
3050     return lowerBRCOND(Op, DAG);
3051   case ISD::VASTART:
3052     return lowerVASTART(Op, DAG);
3053   case ISD::FRAMEADDR:
3054     return lowerFRAMEADDR(Op, DAG);
3055   case ISD::RETURNADDR:
3056     return lowerRETURNADDR(Op, DAG);
3057   case ISD::SHL_PARTS:
3058     return lowerShiftLeftParts(Op, DAG);
3059   case ISD::SRA_PARTS:
3060     return lowerShiftRightParts(Op, DAG, true);
3061   case ISD::SRL_PARTS:
3062     return lowerShiftRightParts(Op, DAG, false);
3063   case ISD::BITCAST: {
3064     SDLoc DL(Op);
3065     EVT VT = Op.getValueType();
3066     SDValue Op0 = Op.getOperand(0);
3067     EVT Op0VT = Op0.getValueType();
3068     MVT XLenVT = Subtarget.getXLenVT();
3069     if (VT.isFixedLengthVector()) {
3070       // We can handle fixed length vector bitcasts with a simple replacement
3071       // in isel.
3072       if (Op0VT.isFixedLengthVector())
3073         return Op;
3074       // When bitcasting from scalar to fixed-length vector, insert the scalar
3075       // into a one-element vector of the result type, and perform a vector
3076       // bitcast.
3077       if (!Op0VT.isVector()) {
3078         EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
3079         if (!isTypeLegal(BVT))
3080           return SDValue();
3081         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
3082                                               DAG.getUNDEF(BVT), Op0,
3083                                               DAG.getConstant(0, DL, XLenVT)));
3084       }
3085       return SDValue();
3086     }
3087     // Custom-legalize bitcasts from fixed-length vector types to scalar types
3088     // thus: bitcast the vector to a one-element vector type whose element type
3089     // is the same as the result type, and extract the first element.
3090     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
3091       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
3092       if (!isTypeLegal(BVT))
3093         return SDValue();
3094       SDValue BVec = DAG.getBitcast(BVT, Op0);
3095       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
3096                          DAG.getConstant(0, DL, XLenVT));
3097     }
3098     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
3099       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
3100       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
3101       return FPConv;
3102     }
3103     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
3104         Subtarget.hasStdExtF()) {
3105       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
3106       SDValue FPConv =
3107           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
3108       return FPConv;
3109     }
3110     return SDValue();
3111   }
3112   case ISD::INTRINSIC_WO_CHAIN:
3113     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
3114   case ISD::INTRINSIC_W_CHAIN:
3115     return LowerINTRINSIC_W_CHAIN(Op, DAG);
3116   case ISD::INTRINSIC_VOID:
3117     return LowerINTRINSIC_VOID(Op, DAG);
3118   case ISD::BSWAP:
3119   case ISD::BITREVERSE: {
3120     MVT VT = Op.getSimpleValueType();
3121     SDLoc DL(Op);
3122     if (Subtarget.hasStdExtZbp()) {
3123       // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
3124       // Start with the maximum immediate value which is the bitwidth - 1.
3125       unsigned Imm = VT.getSizeInBits() - 1;
3126       // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
3127       if (Op.getOpcode() == ISD::BSWAP)
3128         Imm &= ~0x7U;
3129       return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
3130                          DAG.getConstant(Imm, DL, VT));
3131     }
3132     assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization");
3133     assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode");
3134     // Expand bitreverse to a bswap(rev8) followed by brev8.
3135     SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0));
3136     // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized
3137     // as brev8 by an isel pattern.
3138     return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
3139                        DAG.getConstant(7, DL, VT));
3140   }
3141   case ISD::FSHL:
3142   case ISD::FSHR: {
3143     MVT VT = Op.getSimpleValueType();
3144     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
3145     SDLoc DL(Op);
3146     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
3147     // use log(XLen) bits. Mask the shift amount accordingly to prevent
3148     // accidentally setting the extra bit.
3149     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
3150     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
3151                                 DAG.getConstant(ShAmtWidth, DL, VT));
3152     // fshl and fshr concatenate their operands in the same order. fsr and fsl
3153     // instruction use different orders. fshl will return its first operand for
3154     // shift of zero, fshr will return its second operand. fsl and fsr both
3155     // return rs1 so the ISD nodes need to have different operand orders.
3156     // Shift amount is in rs2.
3157     SDValue Op0 = Op.getOperand(0);
3158     SDValue Op1 = Op.getOperand(1);
3159     unsigned Opc = RISCVISD::FSL;
3160     if (Op.getOpcode() == ISD::FSHR) {
3161       std::swap(Op0, Op1);
3162       Opc = RISCVISD::FSR;
3163     }
3164     return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
3165   }
3166   case ISD::TRUNCATE: {
3167     SDLoc DL(Op);
3168     MVT VT = Op.getSimpleValueType();
3169     // Only custom-lower vector truncates
3170     if (!VT.isVector())
3171       return Op;
3172 
3173     // Truncates to mask types are handled differently
3174     if (VT.getVectorElementType() == MVT::i1)
3175       return lowerVectorMaskTrunc(Op, DAG);
3176 
3177     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
3178     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
3179     // truncate by one power of two at a time.
3180     MVT DstEltVT = VT.getVectorElementType();
3181 
3182     SDValue Src = Op.getOperand(0);
3183     MVT SrcVT = Src.getSimpleValueType();
3184     MVT SrcEltVT = SrcVT.getVectorElementType();
3185 
3186     assert(DstEltVT.bitsLT(SrcEltVT) &&
3187            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
3188            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
3189            "Unexpected vector truncate lowering");
3190 
3191     MVT ContainerVT = SrcVT;
3192     if (SrcVT.isFixedLengthVector()) {
3193       ContainerVT = getContainerForFixedLengthVector(SrcVT);
3194       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3195     }
3196 
3197     SDValue Result = Src;
3198     SDValue Mask, VL;
3199     std::tie(Mask, VL) =
3200         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
3201     LLVMContext &Context = *DAG.getContext();
3202     const ElementCount Count = ContainerVT.getVectorElementCount();
3203     do {
3204       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
3205       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
3206       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
3207                            Mask, VL);
3208     } while (SrcEltVT != DstEltVT);
3209 
3210     if (SrcVT.isFixedLengthVector())
3211       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3212 
3213     return Result;
3214   }
3215   case ISD::ANY_EXTEND:
3216   case ISD::ZERO_EXTEND:
3217     if (Op.getOperand(0).getValueType().isVector() &&
3218         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3219       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
3220     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
3221   case ISD::SIGN_EXTEND:
3222     if (Op.getOperand(0).getValueType().isVector() &&
3223         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3224       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
3225     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
3226   case ISD::SPLAT_VECTOR_PARTS:
3227     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
3228   case ISD::INSERT_VECTOR_ELT:
3229     return lowerINSERT_VECTOR_ELT(Op, DAG);
3230   case ISD::EXTRACT_VECTOR_ELT:
3231     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
3232   case ISD::VSCALE: {
3233     MVT VT = Op.getSimpleValueType();
3234     SDLoc DL(Op);
3235     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
3236     // We define our scalable vector types for lmul=1 to use a 64 bit known
3237     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
3238     // vscale as VLENB / 8.
3239     static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!");
3240     if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock)
3241       report_fatal_error("Support for VLEN==32 is incomplete.");
3242     if (isa<ConstantSDNode>(Op.getOperand(0))) {
3243       // We assume VLENB is a multiple of 8. We manually choose the best shift
3244       // here because SimplifyDemandedBits isn't always able to simplify it.
3245       uint64_t Val = Op.getConstantOperandVal(0);
3246       if (isPowerOf2_64(Val)) {
3247         uint64_t Log2 = Log2_64(Val);
3248         if (Log2 < 3)
3249           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
3250                              DAG.getConstant(3 - Log2, DL, VT));
3251         if (Log2 > 3)
3252           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
3253                              DAG.getConstant(Log2 - 3, DL, VT));
3254         return VLENB;
3255       }
3256       // If the multiplier is a multiple of 8, scale it down to avoid needing
3257       // to shift the VLENB value.
3258       if ((Val % 8) == 0)
3259         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
3260                            DAG.getConstant(Val / 8, DL, VT));
3261     }
3262 
3263     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
3264                                  DAG.getConstant(3, DL, VT));
3265     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
3266   }
3267   case ISD::FPOWI: {
3268     // Custom promote f16 powi with illegal i32 integer type on RV64. Once
3269     // promoted this will be legalized into a libcall by LegalizeIntegerTypes.
3270     if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() &&
3271         Op.getOperand(1).getValueType() == MVT::i32) {
3272       SDLoc DL(Op);
3273       SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0));
3274       SDValue Powi =
3275           DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1));
3276       return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi,
3277                          DAG.getIntPtrConstant(0, DL));
3278     }
3279     return SDValue();
3280   }
3281   case ISD::FP_EXTEND: {
3282     // RVV can only do fp_extend to types double the size as the source. We
3283     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
3284     // via f32.
3285     SDLoc DL(Op);
3286     MVT VT = Op.getSimpleValueType();
3287     SDValue Src = Op.getOperand(0);
3288     MVT SrcVT = Src.getSimpleValueType();
3289 
3290     // Prepare any fixed-length vector operands.
3291     MVT ContainerVT = VT;
3292     if (SrcVT.isFixedLengthVector()) {
3293       ContainerVT = getContainerForFixedLengthVector(VT);
3294       MVT SrcContainerVT =
3295           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
3296       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3297     }
3298 
3299     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
3300         SrcVT.getVectorElementType() != MVT::f16) {
3301       // For scalable vectors, we only need to close the gap between
3302       // vXf16->vXf64.
3303       if (!VT.isFixedLengthVector())
3304         return Op;
3305       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
3306       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3307       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3308     }
3309 
3310     MVT InterVT = VT.changeVectorElementType(MVT::f32);
3311     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
3312     SDValue IntermediateExtend = getRVVFPExtendOrRound(
3313         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
3314 
3315     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
3316                                            DL, DAG, Subtarget);
3317     if (VT.isFixedLengthVector())
3318       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
3319     return Extend;
3320   }
3321   case ISD::FP_ROUND: {
3322     // RVV can only do fp_round to types half the size as the source. We
3323     // custom-lower f64->f16 rounds via RVV's round-to-odd float
3324     // conversion instruction.
3325     SDLoc DL(Op);
3326     MVT VT = Op.getSimpleValueType();
3327     SDValue Src = Op.getOperand(0);
3328     MVT SrcVT = Src.getSimpleValueType();
3329 
3330     // Prepare any fixed-length vector operands.
3331     MVT ContainerVT = VT;
3332     if (VT.isFixedLengthVector()) {
3333       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3334       ContainerVT =
3335           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3336       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3337     }
3338 
3339     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
3340         SrcVT.getVectorElementType() != MVT::f64) {
3341       // For scalable vectors, we only need to close the gap between
3342       // vXf64<->vXf16.
3343       if (!VT.isFixedLengthVector())
3344         return Op;
3345       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
3346       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
3347       return convertFromScalableVector(VT, Src, DAG, Subtarget);
3348     }
3349 
3350     SDValue Mask, VL;
3351     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3352 
3353     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
3354     SDValue IntermediateRound =
3355         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
3356     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
3357                                           DL, DAG, Subtarget);
3358 
3359     if (VT.isFixedLengthVector())
3360       return convertFromScalableVector(VT, Round, DAG, Subtarget);
3361     return Round;
3362   }
3363   case ISD::FP_TO_SINT:
3364   case ISD::FP_TO_UINT:
3365   case ISD::SINT_TO_FP:
3366   case ISD::UINT_TO_FP: {
3367     // RVV can only do fp<->int conversions to types half/double the size as
3368     // the source. We custom-lower any conversions that do two hops into
3369     // sequences.
3370     MVT VT = Op.getSimpleValueType();
3371     if (!VT.isVector())
3372       return Op;
3373     SDLoc DL(Op);
3374     SDValue Src = Op.getOperand(0);
3375     MVT EltVT = VT.getVectorElementType();
3376     MVT SrcVT = Src.getSimpleValueType();
3377     MVT SrcEltVT = SrcVT.getVectorElementType();
3378     unsigned EltSize = EltVT.getSizeInBits();
3379     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
3380     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
3381            "Unexpected vector element types");
3382 
3383     bool IsInt2FP = SrcEltVT.isInteger();
3384     // Widening conversions
3385     if (EltSize > (2 * SrcEltSize)) {
3386       if (IsInt2FP) {
3387         // Do a regular integer sign/zero extension then convert to float.
3388         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
3389                                       VT.getVectorElementCount());
3390         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
3391                                  ? ISD::ZERO_EXTEND
3392                                  : ISD::SIGN_EXTEND;
3393         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
3394         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
3395       }
3396       // FP2Int
3397       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
3398       // Do one doubling fp_extend then complete the operation by converting
3399       // to int.
3400       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3401       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
3402       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
3403     }
3404 
3405     // Narrowing conversions
3406     if (SrcEltSize > (2 * EltSize)) {
3407       if (IsInt2FP) {
3408         // One narrowing int_to_fp, then an fp_round.
3409         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
3410         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
3411         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
3412         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
3413       }
3414       // FP2Int
3415       // One narrowing fp_to_int, then truncate the integer. If the float isn't
3416       // representable by the integer, the result is poison.
3417       MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
3418                                     VT.getVectorElementCount());
3419       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
3420       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
3421     }
3422 
3423     // Scalable vectors can exit here. Patterns will handle equally-sized
3424     // conversions halving/doubling ones.
3425     if (!VT.isFixedLengthVector())
3426       return Op;
3427 
3428     // For fixed-length vectors we lower to a custom "VL" node.
3429     unsigned RVVOpc = 0;
3430     switch (Op.getOpcode()) {
3431     default:
3432       llvm_unreachable("Impossible opcode");
3433     case ISD::FP_TO_SINT:
3434       RVVOpc = RISCVISD::FP_TO_SINT_VL;
3435       break;
3436     case ISD::FP_TO_UINT:
3437       RVVOpc = RISCVISD::FP_TO_UINT_VL;
3438       break;
3439     case ISD::SINT_TO_FP:
3440       RVVOpc = RISCVISD::SINT_TO_FP_VL;
3441       break;
3442     case ISD::UINT_TO_FP:
3443       RVVOpc = RISCVISD::UINT_TO_FP_VL;
3444       break;
3445     }
3446 
3447     MVT ContainerVT, SrcContainerVT;
3448     // Derive the reference container type from the larger vector type.
3449     if (SrcEltSize > EltSize) {
3450       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
3451       ContainerVT =
3452           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
3453     } else {
3454       ContainerVT = getContainerForFixedLengthVector(VT);
3455       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
3456     }
3457 
3458     SDValue Mask, VL;
3459     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3460 
3461     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
3462     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
3463     return convertFromScalableVector(VT, Src, DAG, Subtarget);
3464   }
3465   case ISD::FP_TO_SINT_SAT:
3466   case ISD::FP_TO_UINT_SAT:
3467     return lowerFP_TO_INT_SAT(Op, DAG, Subtarget);
3468   case ISD::FTRUNC:
3469   case ISD::FCEIL:
3470   case ISD::FFLOOR:
3471     return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG);
3472   case ISD::FROUND:
3473     return lowerFROUND(Op, DAG);
3474   case ISD::VECREDUCE_ADD:
3475   case ISD::VECREDUCE_UMAX:
3476   case ISD::VECREDUCE_SMAX:
3477   case ISD::VECREDUCE_UMIN:
3478   case ISD::VECREDUCE_SMIN:
3479     return lowerVECREDUCE(Op, DAG);
3480   case ISD::VECREDUCE_AND:
3481   case ISD::VECREDUCE_OR:
3482   case ISD::VECREDUCE_XOR:
3483     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
3484       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
3485     return lowerVECREDUCE(Op, DAG);
3486   case ISD::VECREDUCE_FADD:
3487   case ISD::VECREDUCE_SEQ_FADD:
3488   case ISD::VECREDUCE_FMIN:
3489   case ISD::VECREDUCE_FMAX:
3490     return lowerFPVECREDUCE(Op, DAG);
3491   case ISD::VP_REDUCE_ADD:
3492   case ISD::VP_REDUCE_UMAX:
3493   case ISD::VP_REDUCE_SMAX:
3494   case ISD::VP_REDUCE_UMIN:
3495   case ISD::VP_REDUCE_SMIN:
3496   case ISD::VP_REDUCE_FADD:
3497   case ISD::VP_REDUCE_SEQ_FADD:
3498   case ISD::VP_REDUCE_FMIN:
3499   case ISD::VP_REDUCE_FMAX:
3500     return lowerVPREDUCE(Op, DAG);
3501   case ISD::VP_REDUCE_AND:
3502   case ISD::VP_REDUCE_OR:
3503   case ISD::VP_REDUCE_XOR:
3504     if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
3505       return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
3506     return lowerVPREDUCE(Op, DAG);
3507   case ISD::INSERT_SUBVECTOR:
3508     return lowerINSERT_SUBVECTOR(Op, DAG);
3509   case ISD::EXTRACT_SUBVECTOR:
3510     return lowerEXTRACT_SUBVECTOR(Op, DAG);
3511   case ISD::STEP_VECTOR:
3512     return lowerSTEP_VECTOR(Op, DAG);
3513   case ISD::VECTOR_REVERSE:
3514     return lowerVECTOR_REVERSE(Op, DAG);
3515   case ISD::VECTOR_SPLICE:
3516     return lowerVECTOR_SPLICE(Op, DAG);
3517   case ISD::BUILD_VECTOR:
3518     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
3519   case ISD::SPLAT_VECTOR:
3520     if (Op.getValueType().getVectorElementType() == MVT::i1)
3521       return lowerVectorMaskSplat(Op, DAG);
3522     return SDValue();
3523   case ISD::VECTOR_SHUFFLE:
3524     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
3525   case ISD::CONCAT_VECTORS: {
3526     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
3527     // better than going through the stack, as the default expansion does.
3528     SDLoc DL(Op);
3529     MVT VT = Op.getSimpleValueType();
3530     unsigned NumOpElts =
3531         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
3532     SDValue Vec = DAG.getUNDEF(VT);
3533     for (const auto &OpIdx : enumerate(Op->ops())) {
3534       SDValue SubVec = OpIdx.value();
3535       // Don't insert undef subvectors.
3536       if (SubVec.isUndef())
3537         continue;
3538       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec,
3539                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
3540     }
3541     return Vec;
3542   }
3543   case ISD::LOAD:
3544     if (auto V = expandUnalignedRVVLoad(Op, DAG))
3545       return V;
3546     if (Op.getValueType().isFixedLengthVector())
3547       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
3548     return Op;
3549   case ISD::STORE:
3550     if (auto V = expandUnalignedRVVStore(Op, DAG))
3551       return V;
3552     if (Op.getOperand(1).getValueType().isFixedLengthVector())
3553       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
3554     return Op;
3555   case ISD::MLOAD:
3556   case ISD::VP_LOAD:
3557     return lowerMaskedLoad(Op, DAG);
3558   case ISD::MSTORE:
3559   case ISD::VP_STORE:
3560     return lowerMaskedStore(Op, DAG);
3561   case ISD::SETCC:
3562     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
3563   case ISD::ADD:
3564     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
3565   case ISD::SUB:
3566     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
3567   case ISD::MUL:
3568     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
3569   case ISD::MULHS:
3570     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
3571   case ISD::MULHU:
3572     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
3573   case ISD::AND:
3574     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
3575                                               RISCVISD::AND_VL);
3576   case ISD::OR:
3577     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
3578                                               RISCVISD::OR_VL);
3579   case ISD::XOR:
3580     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
3581                                               RISCVISD::XOR_VL);
3582   case ISD::SDIV:
3583     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
3584   case ISD::SREM:
3585     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
3586   case ISD::UDIV:
3587     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
3588   case ISD::UREM:
3589     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
3590   case ISD::SHL:
3591   case ISD::SRA:
3592   case ISD::SRL:
3593     if (Op.getSimpleValueType().isFixedLengthVector())
3594       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
3595     // This can be called for an i32 shift amount that needs to be promoted.
3596     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
3597            "Unexpected custom legalisation");
3598     return SDValue();
3599   case ISD::SADDSAT:
3600     return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
3601   case ISD::UADDSAT:
3602     return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
3603   case ISD::SSUBSAT:
3604     return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
3605   case ISD::USUBSAT:
3606     return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
3607   case ISD::FADD:
3608     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
3609   case ISD::FSUB:
3610     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
3611   case ISD::FMUL:
3612     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
3613   case ISD::FDIV:
3614     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
3615   case ISD::FNEG:
3616     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
3617   case ISD::FABS:
3618     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
3619   case ISD::FSQRT:
3620     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
3621   case ISD::FMA:
3622     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
3623   case ISD::SMIN:
3624     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
3625   case ISD::SMAX:
3626     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
3627   case ISD::UMIN:
3628     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
3629   case ISD::UMAX:
3630     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
3631   case ISD::FMINNUM:
3632     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
3633   case ISD::FMAXNUM:
3634     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
3635   case ISD::ABS:
3636     return lowerABS(Op, DAG);
3637   case ISD::CTLZ_ZERO_UNDEF:
3638   case ISD::CTTZ_ZERO_UNDEF:
3639     return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG);
3640   case ISD::VSELECT:
3641     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
3642   case ISD::FCOPYSIGN:
3643     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
3644   case ISD::MGATHER:
3645   case ISD::VP_GATHER:
3646     return lowerMaskedGather(Op, DAG);
3647   case ISD::MSCATTER:
3648   case ISD::VP_SCATTER:
3649     return lowerMaskedScatter(Op, DAG);
3650   case ISD::FLT_ROUNDS_:
3651     return lowerGET_ROUNDING(Op, DAG);
3652   case ISD::SET_ROUNDING:
3653     return lowerSET_ROUNDING(Op, DAG);
3654   case ISD::VP_SELECT:
3655     return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL);
3656   case ISD::VP_MERGE:
3657     return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL);
3658   case ISD::VP_ADD:
3659     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
3660   case ISD::VP_SUB:
3661     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
3662   case ISD::VP_MUL:
3663     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
3664   case ISD::VP_SDIV:
3665     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
3666   case ISD::VP_UDIV:
3667     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
3668   case ISD::VP_SREM:
3669     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
3670   case ISD::VP_UREM:
3671     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
3672   case ISD::VP_AND:
3673     return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL);
3674   case ISD::VP_OR:
3675     return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL);
3676   case ISD::VP_XOR:
3677     return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL);
3678   case ISD::VP_ASHR:
3679     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
3680   case ISD::VP_LSHR:
3681     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
3682   case ISD::VP_SHL:
3683     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
3684   case ISD::VP_FADD:
3685     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
3686   case ISD::VP_FSUB:
3687     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
3688   case ISD::VP_FMUL:
3689     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
3690   case ISD::VP_FDIV:
3691     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
3692   case ISD::VP_FNEG:
3693     return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL);
3694   case ISD::VP_FMA:
3695     return lowerVPOp(Op, DAG, RISCVISD::FMA_VL);
3696   case ISD::VP_SEXT:
3697   case ISD::VP_ZEXT:
3698     if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1)
3699       return lowerVPExtMaskOp(Op, DAG);
3700     return lowerVPOp(Op, DAG,
3701                      Op.getOpcode() == ISD::VP_SEXT ? RISCVISD::VSEXT_VL
3702                                                     : RISCVISD::VZEXT_VL);
3703   case ISD::VP_FPTOSI:
3704     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL);
3705   case ISD::VP_FPTOUI:
3706     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL);
3707   case ISD::VP_SITOFP:
3708     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL);
3709   case ISD::VP_UITOFP:
3710     return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL);
3711   case ISD::VP_SETCC:
3712     return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL);
3713   }
3714 }
3715 
3716 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
3717                              SelectionDAG &DAG, unsigned Flags) {
3718   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
3719 }
3720 
3721 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
3722                              SelectionDAG &DAG, unsigned Flags) {
3723   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
3724                                    Flags);
3725 }
3726 
3727 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
3728                              SelectionDAG &DAG, unsigned Flags) {
3729   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
3730                                    N->getOffset(), Flags);
3731 }
3732 
3733 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
3734                              SelectionDAG &DAG, unsigned Flags) {
3735   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
3736 }
3737 
3738 template <class NodeTy>
3739 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
3740                                      bool IsLocal) const {
3741   SDLoc DL(N);
3742   EVT Ty = getPointerTy(DAG.getDataLayout());
3743 
3744   if (isPositionIndependent()) {
3745     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3746     if (IsLocal)
3747       // Use PC-relative addressing to access the symbol. This generates the
3748       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
3749       // %pcrel_lo(auipc)).
3750       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3751 
3752     // Use PC-relative addressing to access the GOT for this symbol, then load
3753     // the address from the GOT. This generates the pattern (PseudoLA sym),
3754     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
3755     SDValue Load =
3756         SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
3757     MachineFunction &MF = DAG.getMachineFunction();
3758     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3759         MachinePointerInfo::getGOT(MF),
3760         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3761             MachineMemOperand::MOInvariant,
3762         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3763     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3764     return Load;
3765   }
3766 
3767   switch (getTargetMachine().getCodeModel()) {
3768   default:
3769     report_fatal_error("Unsupported code model for lowering");
3770   case CodeModel::Small: {
3771     // Generate a sequence for accessing addresses within the first 2 GiB of
3772     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
3773     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
3774     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
3775     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3776     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
3777   }
3778   case CodeModel::Medium: {
3779     // Generate a sequence for accessing addresses within any 2GiB range within
3780     // the address space. This generates the pattern (PseudoLLA sym), which
3781     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3782     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3783     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3784   }
3785   }
3786 }
3787 
3788 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>(
3789     GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3790 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>(
3791     BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3792 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>(
3793     ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3794 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>(
3795     JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const;
3796 
3797 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3798                                                 SelectionDAG &DAG) const {
3799   SDLoc DL(Op);
3800   EVT Ty = Op.getValueType();
3801   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3802   int64_t Offset = N->getOffset();
3803   MVT XLenVT = Subtarget.getXLenVT();
3804 
3805   const GlobalValue *GV = N->getGlobal();
3806   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3807   SDValue Addr = getAddr(N, DAG, IsLocal);
3808 
3809   // In order to maximise the opportunity for common subexpression elimination,
3810   // emit a separate ADD node for the global address offset instead of folding
3811   // it in the global address node. Later peephole optimisations may choose to
3812   // fold it back in when profitable.
3813   if (Offset != 0)
3814     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3815                        DAG.getConstant(Offset, DL, XLenVT));
3816   return Addr;
3817 }
3818 
3819 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3820                                                SelectionDAG &DAG) const {
3821   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3822 
3823   return getAddr(N, DAG);
3824 }
3825 
3826 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3827                                                SelectionDAG &DAG) const {
3828   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3829 
3830   return getAddr(N, DAG);
3831 }
3832 
3833 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3834                                             SelectionDAG &DAG) const {
3835   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3836 
3837   return getAddr(N, DAG);
3838 }
3839 
3840 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3841                                               SelectionDAG &DAG,
3842                                               bool UseGOT) const {
3843   SDLoc DL(N);
3844   EVT Ty = getPointerTy(DAG.getDataLayout());
3845   const GlobalValue *GV = N->getGlobal();
3846   MVT XLenVT = Subtarget.getXLenVT();
3847 
3848   if (UseGOT) {
3849     // Use PC-relative addressing to access the GOT for this TLS symbol, then
3850     // load the address from the GOT and add the thread pointer. This generates
3851     // the pattern (PseudoLA_TLS_IE sym), which expands to
3852     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3853     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3854     SDValue Load =
3855         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3856     MachineFunction &MF = DAG.getMachineFunction();
3857     MachineMemOperand *MemOp = MF.getMachineMemOperand(
3858         MachinePointerInfo::getGOT(MF),
3859         MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
3860             MachineMemOperand::MOInvariant,
3861         LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8));
3862     DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp});
3863 
3864     // Add the thread pointer.
3865     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3866     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3867   }
3868 
3869   // Generate a sequence for accessing the address relative to the thread
3870   // pointer, with the appropriate adjustment for the thread pointer offset.
3871   // This generates the pattern
3872   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3873   SDValue AddrHi =
3874       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3875   SDValue AddrAdd =
3876       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3877   SDValue AddrLo =
3878       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3879 
3880   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3881   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3882   SDValue MNAdd = SDValue(
3883       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3884       0);
3885   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3886 }
3887 
3888 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3889                                                SelectionDAG &DAG) const {
3890   SDLoc DL(N);
3891   EVT Ty = getPointerTy(DAG.getDataLayout());
3892   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3893   const GlobalValue *GV = N->getGlobal();
3894 
3895   // Use a PC-relative addressing mode to access the global dynamic GOT address.
3896   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3897   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3898   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3899   SDValue Load =
3900       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3901 
3902   // Prepare argument list to generate call.
3903   ArgListTy Args;
3904   ArgListEntry Entry;
3905   Entry.Node = Load;
3906   Entry.Ty = CallTy;
3907   Args.push_back(Entry);
3908 
3909   // Setup call to __tls_get_addr.
3910   TargetLowering::CallLoweringInfo CLI(DAG);
3911   CLI.setDebugLoc(DL)
3912       .setChain(DAG.getEntryNode())
3913       .setLibCallee(CallingConv::C, CallTy,
3914                     DAG.getExternalSymbol("__tls_get_addr", Ty),
3915                     std::move(Args));
3916 
3917   return LowerCallTo(CLI).first;
3918 }
3919 
3920 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3921                                                    SelectionDAG &DAG) const {
3922   SDLoc DL(Op);
3923   EVT Ty = Op.getValueType();
3924   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3925   int64_t Offset = N->getOffset();
3926   MVT XLenVT = Subtarget.getXLenVT();
3927 
3928   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3929 
3930   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3931       CallingConv::GHC)
3932     report_fatal_error("In GHC calling convention TLS is not supported");
3933 
3934   SDValue Addr;
3935   switch (Model) {
3936   case TLSModel::LocalExec:
3937     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3938     break;
3939   case TLSModel::InitialExec:
3940     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3941     break;
3942   case TLSModel::LocalDynamic:
3943   case TLSModel::GeneralDynamic:
3944     Addr = getDynamicTLSAddr(N, DAG);
3945     break;
3946   }
3947 
3948   // In order to maximise the opportunity for common subexpression elimination,
3949   // emit a separate ADD node for the global address offset instead of folding
3950   // it in the global address node. Later peephole optimisations may choose to
3951   // fold it back in when profitable.
3952   if (Offset != 0)
3953     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3954                        DAG.getConstant(Offset, DL, XLenVT));
3955   return Addr;
3956 }
3957 
3958 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3959   SDValue CondV = Op.getOperand(0);
3960   SDValue TrueV = Op.getOperand(1);
3961   SDValue FalseV = Op.getOperand(2);
3962   SDLoc DL(Op);
3963   MVT VT = Op.getSimpleValueType();
3964   MVT XLenVT = Subtarget.getXLenVT();
3965 
3966   // Lower vector SELECTs to VSELECTs by splatting the condition.
3967   if (VT.isVector()) {
3968     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3969     SDValue CondSplat = VT.isScalableVector()
3970                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3971                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3972     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3973   }
3974 
3975   // If the result type is XLenVT and CondV is the output of a SETCC node
3976   // which also operated on XLenVT inputs, then merge the SETCC node into the
3977   // lowered RISCVISD::SELECT_CC to take advantage of the integer
3978   // compare+branch instructions. i.e.:
3979   // (select (setcc lhs, rhs, cc), truev, falsev)
3980   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3981   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3982       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3983     SDValue LHS = CondV.getOperand(0);
3984     SDValue RHS = CondV.getOperand(1);
3985     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3986     ISD::CondCode CCVal = CC->get();
3987 
3988     // Special case for a select of 2 constants that have a diffence of 1.
3989     // Normally this is done by DAGCombine, but if the select is introduced by
3990     // type legalization or op legalization, we miss it. Restricting to SETLT
3991     // case for now because that is what signed saturating add/sub need.
3992     // FIXME: We don't need the condition to be SETLT or even a SETCC,
3993     // but we would probably want to swap the true/false values if the condition
3994     // is SETGE/SETLE to avoid an XORI.
3995     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3996         CCVal == ISD::SETLT) {
3997       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3998       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3999       if (TrueVal - 1 == FalseVal)
4000         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
4001       if (TrueVal + 1 == FalseVal)
4002         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
4003     }
4004 
4005     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4006 
4007     SDValue TargetCC = DAG.getCondCode(CCVal);
4008     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
4009     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
4010   }
4011 
4012   // Otherwise:
4013   // (select condv, truev, falsev)
4014   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
4015   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4016   SDValue SetNE = DAG.getCondCode(ISD::SETNE);
4017 
4018   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
4019 
4020   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
4021 }
4022 
4023 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
4024   SDValue CondV = Op.getOperand(1);
4025   SDLoc DL(Op);
4026   MVT XLenVT = Subtarget.getXLenVT();
4027 
4028   if (CondV.getOpcode() == ISD::SETCC &&
4029       CondV.getOperand(0).getValueType() == XLenVT) {
4030     SDValue LHS = CondV.getOperand(0);
4031     SDValue RHS = CondV.getOperand(1);
4032     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
4033 
4034     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
4035 
4036     SDValue TargetCC = DAG.getCondCode(CCVal);
4037     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4038                        LHS, RHS, TargetCC, Op.getOperand(2));
4039   }
4040 
4041   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
4042                      CondV, DAG.getConstant(0, DL, XLenVT),
4043                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
4044 }
4045 
4046 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
4047   MachineFunction &MF = DAG.getMachineFunction();
4048   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
4049 
4050   SDLoc DL(Op);
4051   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
4052                                  getPointerTy(MF.getDataLayout()));
4053 
4054   // vastart just stores the address of the VarArgsFrameIndex slot into the
4055   // memory location argument.
4056   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4057   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
4058                       MachinePointerInfo(SV));
4059 }
4060 
4061 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
4062                                             SelectionDAG &DAG) const {
4063   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4064   MachineFunction &MF = DAG.getMachineFunction();
4065   MachineFrameInfo &MFI = MF.getFrameInfo();
4066   MFI.setFrameAddressIsTaken(true);
4067   Register FrameReg = RI.getFrameRegister(MF);
4068   int XLenInBytes = Subtarget.getXLen() / 8;
4069 
4070   EVT VT = Op.getValueType();
4071   SDLoc DL(Op);
4072   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
4073   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4074   while (Depth--) {
4075     int Offset = -(XLenInBytes * 2);
4076     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
4077                               DAG.getIntPtrConstant(Offset, DL));
4078     FrameAddr =
4079         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
4080   }
4081   return FrameAddr;
4082 }
4083 
4084 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
4085                                              SelectionDAG &DAG) const {
4086   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
4087   MachineFunction &MF = DAG.getMachineFunction();
4088   MachineFrameInfo &MFI = MF.getFrameInfo();
4089   MFI.setReturnAddressIsTaken(true);
4090   MVT XLenVT = Subtarget.getXLenVT();
4091   int XLenInBytes = Subtarget.getXLen() / 8;
4092 
4093   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4094     return SDValue();
4095 
4096   EVT VT = Op.getValueType();
4097   SDLoc DL(Op);
4098   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4099   if (Depth) {
4100     int Off = -XLenInBytes;
4101     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
4102     SDValue Offset = DAG.getConstant(Off, DL, VT);
4103     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
4104                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
4105                        MachinePointerInfo());
4106   }
4107 
4108   // Return the value of the return address register, marking it an implicit
4109   // live-in.
4110   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
4111   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
4112 }
4113 
4114 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
4115                                                  SelectionDAG &DAG) const {
4116   SDLoc DL(Op);
4117   SDValue Lo = Op.getOperand(0);
4118   SDValue Hi = Op.getOperand(1);
4119   SDValue Shamt = Op.getOperand(2);
4120   EVT VT = Lo.getValueType();
4121 
4122   // if Shamt-XLEN < 0: // Shamt < XLEN
4123   //   Lo = Lo << Shamt
4124   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt))
4125   // else:
4126   //   Lo = 0
4127   //   Hi = Lo << (Shamt-XLEN)
4128 
4129   SDValue Zero = DAG.getConstant(0, DL, VT);
4130   SDValue One = DAG.getConstant(1, DL, VT);
4131   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4132   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4133   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4134   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4135 
4136   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
4137   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
4138   SDValue ShiftRightLo =
4139       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
4140   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
4141   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
4142   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
4143 
4144   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4145 
4146   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
4147   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4148 
4149   SDValue Parts[2] = {Lo, Hi};
4150   return DAG.getMergeValues(Parts, DL);
4151 }
4152 
4153 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
4154                                                   bool IsSRA) const {
4155   SDLoc DL(Op);
4156   SDValue Lo = Op.getOperand(0);
4157   SDValue Hi = Op.getOperand(1);
4158   SDValue Shamt = Op.getOperand(2);
4159   EVT VT = Lo.getValueType();
4160 
4161   // SRA expansion:
4162   //   if Shamt-XLEN < 0: // Shamt < XLEN
4163   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4164   //     Hi = Hi >>s Shamt
4165   //   else:
4166   //     Lo = Hi >>s (Shamt-XLEN);
4167   //     Hi = Hi >>s (XLEN-1)
4168   //
4169   // SRL expansion:
4170   //   if Shamt-XLEN < 0: // Shamt < XLEN
4171   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1))
4172   //     Hi = Hi >>u Shamt
4173   //   else:
4174   //     Lo = Hi >>u (Shamt-XLEN);
4175   //     Hi = 0;
4176 
4177   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
4178 
4179   SDValue Zero = DAG.getConstant(0, DL, VT);
4180   SDValue One = DAG.getConstant(1, DL, VT);
4181   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
4182   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
4183   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
4184   SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1);
4185 
4186   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
4187   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
4188   SDValue ShiftLeftHi =
4189       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
4190   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
4191   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
4192   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
4193   SDValue HiFalse =
4194       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
4195 
4196   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
4197 
4198   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
4199   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
4200 
4201   SDValue Parts[2] = {Lo, Hi};
4202   return DAG.getMergeValues(Parts, DL);
4203 }
4204 
4205 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
4206 // legal equivalently-sized i8 type, so we can use that as a go-between.
4207 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
4208                                                   SelectionDAG &DAG) const {
4209   SDLoc DL(Op);
4210   MVT VT = Op.getSimpleValueType();
4211   SDValue SplatVal = Op.getOperand(0);
4212   // All-zeros or all-ones splats are handled specially.
4213   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
4214     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4215     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
4216   }
4217   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
4218     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
4219     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
4220   }
4221   MVT XLenVT = Subtarget.getXLenVT();
4222   assert(SplatVal.getValueType() == XLenVT &&
4223          "Unexpected type for i1 splat value");
4224   MVT InterVT = VT.changeVectorElementType(MVT::i8);
4225   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
4226                          DAG.getConstant(1, DL, XLenVT));
4227   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
4228   SDValue Zero = DAG.getConstant(0, DL, InterVT);
4229   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
4230 }
4231 
4232 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
4233 // illegal (currently only vXi64 RV32).
4234 // FIXME: We could also catch non-constant sign-extended i32 values and lower
4235 // them to VMV_V_X_VL.
4236 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
4237                                                      SelectionDAG &DAG) const {
4238   SDLoc DL(Op);
4239   MVT VecVT = Op.getSimpleValueType();
4240   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
4241          "Unexpected SPLAT_VECTOR_PARTS lowering");
4242 
4243   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
4244   SDValue Lo = Op.getOperand(0);
4245   SDValue Hi = Op.getOperand(1);
4246 
4247   if (VecVT.isFixedLengthVector()) {
4248     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4249     SDLoc DL(Op);
4250     SDValue Mask, VL;
4251     std::tie(Mask, VL) =
4252         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4253 
4254     SDValue Res =
4255         splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG);
4256     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
4257   }
4258 
4259   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
4260     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
4261     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
4262     // If Hi constant is all the same sign bit as Lo, lower this as a custom
4263     // node in order to try and match RVV vector/scalar instructions.
4264     if ((LoC >> 31) == HiC)
4265       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
4266                          Lo, DAG.getRegister(RISCV::X0, MVT::i32));
4267   }
4268 
4269   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
4270   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
4271       isa<ConstantSDNode>(Hi.getOperand(1)) &&
4272       Hi.getConstantOperandVal(1) == 31)
4273     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo,
4274                        DAG.getRegister(RISCV::X0, MVT::i32));
4275 
4276   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
4277   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT,
4278                      DAG.getUNDEF(VecVT), Lo, Hi,
4279                      DAG.getRegister(RISCV::X0, MVT::i32));
4280 }
4281 
4282 // Custom-lower extensions from mask vectors by using a vselect either with 1
4283 // for zero/any-extension or -1 for sign-extension:
4284 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
4285 // Note that any-extension is lowered identically to zero-extension.
4286 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
4287                                                 int64_t ExtTrueVal) const {
4288   SDLoc DL(Op);
4289   MVT VecVT = Op.getSimpleValueType();
4290   SDValue Src = Op.getOperand(0);
4291   // Only custom-lower extensions from mask types
4292   assert(Src.getValueType().isVector() &&
4293          Src.getValueType().getVectorElementType() == MVT::i1);
4294 
4295   if (VecVT.isScalableVector()) {
4296     SDValue SplatZero = DAG.getConstant(0, DL, VecVT);
4297     SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT);
4298     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
4299   }
4300 
4301   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
4302   MVT I1ContainerVT =
4303       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4304 
4305   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
4306 
4307   SDValue Mask, VL;
4308   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4309 
4310   MVT XLenVT = Subtarget.getXLenVT();
4311   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
4312   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
4313 
4314   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4315                           DAG.getUNDEF(ContainerVT), SplatZero, VL);
4316   SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4317                              DAG.getUNDEF(ContainerVT), SplatTrueVal, VL);
4318   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
4319                                SplatTrueVal, SplatZero, VL);
4320 
4321   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
4322 }
4323 
4324 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
4325     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
4326   MVT ExtVT = Op.getSimpleValueType();
4327   // Only custom-lower extensions from fixed-length vector types.
4328   if (!ExtVT.isFixedLengthVector())
4329     return Op;
4330   MVT VT = Op.getOperand(0).getSimpleValueType();
4331   // Grab the canonical container type for the extended type. Infer the smaller
4332   // type from that to ensure the same number of vector elements, as we know
4333   // the LMUL will be sufficient to hold the smaller type.
4334   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
4335   // Get the extended container type manually to ensure the same number of
4336   // vector elements between source and dest.
4337   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4338                                      ContainerExtVT.getVectorElementCount());
4339 
4340   SDValue Op1 =
4341       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4342 
4343   SDLoc DL(Op);
4344   SDValue Mask, VL;
4345   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4346 
4347   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
4348 
4349   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
4350 }
4351 
4352 // Custom-lower truncations from vectors to mask vectors by using a mask and a
4353 // setcc operation:
4354 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
4355 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
4356                                                   SelectionDAG &DAG) const {
4357   SDLoc DL(Op);
4358   EVT MaskVT = Op.getValueType();
4359   // Only expect to custom-lower truncations to mask types
4360   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
4361          "Unexpected type for vector mask lowering");
4362   SDValue Src = Op.getOperand(0);
4363   MVT VecVT = Src.getSimpleValueType();
4364 
4365   // If this is a fixed vector, we need to convert it to a scalable vector.
4366   MVT ContainerVT = VecVT;
4367   if (VecVT.isFixedLengthVector()) {
4368     ContainerVT = getContainerForFixedLengthVector(VecVT);
4369     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
4370   }
4371 
4372   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
4373   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
4374 
4375   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4376                          DAG.getUNDEF(ContainerVT), SplatOne);
4377   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4378                           DAG.getUNDEF(ContainerVT), SplatZero);
4379 
4380   if (VecVT.isScalableVector()) {
4381     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
4382     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
4383   }
4384 
4385   SDValue Mask, VL;
4386   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4387 
4388   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
4389   SDValue Trunc =
4390       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
4391   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
4392                       DAG.getCondCode(ISD::SETNE), Mask, VL);
4393   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
4394 }
4395 
4396 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
4397 // first position of a vector, and that vector is slid up to the insert index.
4398 // By limiting the active vector length to index+1 and merging with the
4399 // original vector (with an undisturbed tail policy for elements >= VL), we
4400 // achieve the desired result of leaving all elements untouched except the one
4401 // at VL-1, which is replaced with the desired value.
4402 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4403                                                     SelectionDAG &DAG) const {
4404   SDLoc DL(Op);
4405   MVT VecVT = Op.getSimpleValueType();
4406   SDValue Vec = Op.getOperand(0);
4407   SDValue Val = Op.getOperand(1);
4408   SDValue Idx = Op.getOperand(2);
4409 
4410   if (VecVT.getVectorElementType() == MVT::i1) {
4411     // FIXME: For now we just promote to an i8 vector and insert into that,
4412     // but this is probably not optimal.
4413     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4414     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4415     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
4416     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
4417   }
4418 
4419   MVT ContainerVT = VecVT;
4420   // If the operand is a fixed-length vector, convert to a scalable one.
4421   if (VecVT.isFixedLengthVector()) {
4422     ContainerVT = getContainerForFixedLengthVector(VecVT);
4423     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4424   }
4425 
4426   MVT XLenVT = Subtarget.getXLenVT();
4427 
4428   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4429   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
4430   // Even i64-element vectors on RV32 can be lowered without scalar
4431   // legalization if the most-significant 32 bits of the value are not affected
4432   // by the sign-extension of the lower 32 bits.
4433   // TODO: We could also catch sign extensions of a 32-bit value.
4434   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
4435     const auto *CVal = cast<ConstantSDNode>(Val);
4436     if (isInt<32>(CVal->getSExtValue())) {
4437       IsLegalInsert = true;
4438       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
4439     }
4440   }
4441 
4442   SDValue Mask, VL;
4443   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4444 
4445   SDValue ValInVec;
4446 
4447   if (IsLegalInsert) {
4448     unsigned Opc =
4449         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
4450     if (isNullConstant(Idx)) {
4451       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
4452       if (!VecVT.isFixedLengthVector())
4453         return Vec;
4454       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
4455     }
4456     ValInVec =
4457         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
4458   } else {
4459     // On RV32, i64-element vectors must be specially handled to place the
4460     // value at element 0, by using two vslide1up instructions in sequence on
4461     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
4462     // this.
4463     SDValue One = DAG.getConstant(1, DL, XLenVT);
4464     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
4465     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
4466     MVT I32ContainerVT =
4467         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
4468     SDValue I32Mask =
4469         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
4470     // Limit the active VL to two.
4471     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
4472     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
4473     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
4474     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT,
4475                            DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL);
4476     // First slide in the hi value, then the lo in underneath it.
4477     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4478                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi,
4479                            I32Mask, InsertI64VL);
4480     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT,
4481                            DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo,
4482                            I32Mask, InsertI64VL);
4483     // Bitcast back to the right container type.
4484     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
4485   }
4486 
4487   // Now that the value is in a vector, slide it into position.
4488   SDValue InsertVL =
4489       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
4490   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4491                                 ValInVec, Idx, Mask, InsertVL);
4492   if (!VecVT.isFixedLengthVector())
4493     return Slideup;
4494   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4495 }
4496 
4497 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
4498 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
4499 // types this is done using VMV_X_S to allow us to glean information about the
4500 // sign bits of the result.
4501 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4502                                                      SelectionDAG &DAG) const {
4503   SDLoc DL(Op);
4504   SDValue Idx = Op.getOperand(1);
4505   SDValue Vec = Op.getOperand(0);
4506   EVT EltVT = Op.getValueType();
4507   MVT VecVT = Vec.getSimpleValueType();
4508   MVT XLenVT = Subtarget.getXLenVT();
4509 
4510   if (VecVT.getVectorElementType() == MVT::i1) {
4511     if (VecVT.isFixedLengthVector()) {
4512       unsigned NumElts = VecVT.getVectorNumElements();
4513       if (NumElts >= 8) {
4514         MVT WideEltVT;
4515         unsigned WidenVecLen;
4516         SDValue ExtractElementIdx;
4517         SDValue ExtractBitIdx;
4518         unsigned MaxEEW = Subtarget.getELEN();
4519         MVT LargestEltVT = MVT::getIntegerVT(
4520             std::min(MaxEEW, unsigned(XLenVT.getSizeInBits())));
4521         if (NumElts <= LargestEltVT.getSizeInBits()) {
4522           assert(isPowerOf2_32(NumElts) &&
4523                  "the number of elements should be power of 2");
4524           WideEltVT = MVT::getIntegerVT(NumElts);
4525           WidenVecLen = 1;
4526           ExtractElementIdx = DAG.getConstant(0, DL, XLenVT);
4527           ExtractBitIdx = Idx;
4528         } else {
4529           WideEltVT = LargestEltVT;
4530           WidenVecLen = NumElts / WideEltVT.getSizeInBits();
4531           // extract element index = index / element width
4532           ExtractElementIdx = DAG.getNode(
4533               ISD::SRL, DL, XLenVT, Idx,
4534               DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT));
4535           // mask bit index = index % element width
4536           ExtractBitIdx = DAG.getNode(
4537               ISD::AND, DL, XLenVT, Idx,
4538               DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT));
4539         }
4540         MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen);
4541         Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec);
4542         SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT,
4543                                          Vec, ExtractElementIdx);
4544         // Extract the bit from GPR.
4545         SDValue ShiftRight =
4546             DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx);
4547         return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight,
4548                            DAG.getConstant(1, DL, XLenVT));
4549       }
4550     }
4551     // Otherwise, promote to an i8 vector and extract from that.
4552     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
4553     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
4554     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
4555   }
4556 
4557   // If this is a fixed vector, we need to convert it to a scalable vector.
4558   MVT ContainerVT = VecVT;
4559   if (VecVT.isFixedLengthVector()) {
4560     ContainerVT = getContainerForFixedLengthVector(VecVT);
4561     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4562   }
4563 
4564   // If the index is 0, the vector is already in the right position.
4565   if (!isNullConstant(Idx)) {
4566     // Use a VL of 1 to avoid processing more elements than we need.
4567     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4568     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4569     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4570     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4571                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4572   }
4573 
4574   if (!EltVT.isInteger()) {
4575     // Floating-point extracts are handled in TableGen.
4576     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
4577                        DAG.getConstant(0, DL, XLenVT));
4578   }
4579 
4580   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4581   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
4582 }
4583 
4584 // Some RVV intrinsics may claim that they want an integer operand to be
4585 // promoted or expanded.
4586 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
4587                                            const RISCVSubtarget &Subtarget) {
4588   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
4589           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
4590          "Unexpected opcode");
4591 
4592   if (!Subtarget.hasVInstructions())
4593     return SDValue();
4594 
4595   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
4596   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
4597   SDLoc DL(Op);
4598 
4599   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
4600       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
4601   if (!II || !II->hasScalarOperand())
4602     return SDValue();
4603 
4604   unsigned SplatOp = II->ScalarOperand + 1 + HasChain;
4605   assert(SplatOp < Op.getNumOperands());
4606 
4607   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
4608   SDValue &ScalarOp = Operands[SplatOp];
4609   MVT OpVT = ScalarOp.getSimpleValueType();
4610   MVT XLenVT = Subtarget.getXLenVT();
4611 
4612   // If this isn't a scalar, or its type is XLenVT we're done.
4613   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
4614     return SDValue();
4615 
4616   // Simplest case is that the operand needs to be promoted to XLenVT.
4617   if (OpVT.bitsLT(XLenVT)) {
4618     // If the operand is a constant, sign extend to increase our chances
4619     // of being able to use a .vi instruction. ANY_EXTEND would become a
4620     // a zero extend and the simm5 check in isel would fail.
4621     // FIXME: Should we ignore the upper bits in isel instead?
4622     unsigned ExtOpc =
4623         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
4624     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
4625     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4626   }
4627 
4628   // Use the previous operand to get the vXi64 VT. The result might be a mask
4629   // VT for compares. Using the previous operand assumes that the previous
4630   // operand will never have a smaller element size than a scalar operand and
4631   // that a widening operation never uses SEW=64.
4632   // NOTE: If this fails the below assert, we can probably just find the
4633   // element count from any operand or result and use it to construct the VT.
4634   assert(II->ScalarOperand > 0 && "Unexpected splat operand!");
4635   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
4636 
4637   // The more complex case is when the scalar is larger than XLenVT.
4638   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
4639          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
4640 
4641   // If this is a sign-extended 32-bit value, we can truncate it and rely on the
4642   // instruction to sign-extend since SEW>XLEN.
4643   if (DAG.ComputeNumSignBits(ScalarOp) > 32) {
4644     ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp);
4645     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4646   }
4647 
4648   switch (IntNo) {
4649   case Intrinsic::riscv_vslide1up:
4650   case Intrinsic::riscv_vslide1down:
4651   case Intrinsic::riscv_vslide1up_mask:
4652   case Intrinsic::riscv_vslide1down_mask: {
4653     // We need to special case these when the scalar is larger than XLen.
4654     unsigned NumOps = Op.getNumOperands();
4655     bool IsMasked = NumOps == 7;
4656 
4657     // Convert the vector source to the equivalent nxvXi32 vector.
4658     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
4659     SDValue Vec = DAG.getBitcast(I32VT, Operands[2]);
4660 
4661     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4662                                    DAG.getConstant(0, DL, XLenVT));
4663     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp,
4664                                    DAG.getConstant(1, DL, XLenVT));
4665 
4666     // Double the VL since we halved SEW.
4667     SDValue AVL = getVLOperand(Op);
4668     SDValue I32VL;
4669 
4670     // Optimize for constant AVL
4671     if (isa<ConstantSDNode>(AVL)) {
4672       unsigned EltSize = VT.getScalarSizeInBits();
4673       unsigned MinSize = VT.getSizeInBits().getKnownMinValue();
4674 
4675       unsigned VectorBitsMax = Subtarget.getRealMaxVLen();
4676       unsigned MaxVLMAX =
4677           RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
4678 
4679       unsigned VectorBitsMin = Subtarget.getRealMinVLen();
4680       unsigned MinVLMAX =
4681           RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize);
4682 
4683       uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue();
4684       if (AVLInt <= MinVLMAX) {
4685         I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT);
4686       } else if (AVLInt >= 2 * MaxVLMAX) {
4687         // Just set vl to VLMAX in this situation
4688         RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT);
4689         SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4690         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
4691         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4692         SDValue SETVLMAX = DAG.getTargetConstant(
4693             Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
4694         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
4695                             LMUL);
4696       } else {
4697         // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl
4698         // is related to the hardware implementation.
4699         // So let the following code handle
4700       }
4701     }
4702     if (!I32VL) {
4703       RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT);
4704       SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT);
4705       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
4706       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
4707       SDValue SETVL =
4708           DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
4709       // Using vsetvli instruction to get actually used length which related to
4710       // the hardware implementation
4711       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
4712                                SEW, LMUL);
4713       I32VL =
4714           DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
4715     }
4716 
4717     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
4718     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, I32VL);
4719 
4720     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
4721     // instructions.
4722     SDValue Passthru;
4723     if (IsMasked)
4724       Passthru = DAG.getUNDEF(I32VT);
4725     else
4726       Passthru = DAG.getBitcast(I32VT, Operands[1]);
4727 
4728     if (IntNo == Intrinsic::riscv_vslide1up ||
4729         IntNo == Intrinsic::riscv_vslide1up_mask) {
4730       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4731                         ScalarHi, I32Mask, I32VL);
4732       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec,
4733                         ScalarLo, I32Mask, I32VL);
4734     } else {
4735       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4736                         ScalarLo, I32Mask, I32VL);
4737       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec,
4738                         ScalarHi, I32Mask, I32VL);
4739     }
4740 
4741     // Convert back to nxvXi64.
4742     Vec = DAG.getBitcast(VT, Vec);
4743 
4744     if (!IsMasked)
4745       return Vec;
4746     // Apply mask after the operation.
4747     SDValue Mask = Operands[NumOps - 3];
4748     SDValue MaskedOff = Operands[1];
4749     // Assume Policy operand is the last operand.
4750     uint64_t Policy =
4751         cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue();
4752     // We don't need to select maskedoff if it's undef.
4753     if (MaskedOff.isUndef())
4754       return Vec;
4755     // TAMU
4756     if (Policy == RISCVII::TAIL_AGNOSTIC)
4757       return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff,
4758                          AVL);
4759     // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma.
4760     // It's fine because vmerge does not care mask policy.
4761     return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff,
4762                        AVL);
4763   }
4764   }
4765 
4766   // We need to convert the scalar to a splat vector.
4767   SDValue VL = getVLOperand(Op);
4768   assert(VL.getValueType() == XLenVT);
4769   ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG);
4770   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
4771 }
4772 
4773 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
4774                                                      SelectionDAG &DAG) const {
4775   unsigned IntNo = Op.getConstantOperandVal(0);
4776   SDLoc DL(Op);
4777   MVT XLenVT = Subtarget.getXLenVT();
4778 
4779   switch (IntNo) {
4780   default:
4781     break; // Don't custom lower most intrinsics.
4782   case Intrinsic::thread_pointer: {
4783     EVT PtrVT = getPointerTy(DAG.getDataLayout());
4784     return DAG.getRegister(RISCV::X4, PtrVT);
4785   }
4786   case Intrinsic::riscv_orc_b:
4787   case Intrinsic::riscv_brev8: {
4788     // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8.
4789     unsigned Opc =
4790         IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC;
4791     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4792                        DAG.getConstant(7, DL, XLenVT));
4793   }
4794   case Intrinsic::riscv_grev:
4795   case Intrinsic::riscv_gorc: {
4796     unsigned Opc =
4797         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
4798     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4799   }
4800   case Intrinsic::riscv_zip:
4801   case Intrinsic::riscv_unzip: {
4802     // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip.
4803     // For i32 the immediate is 15. For i64 the immediate is 31.
4804     unsigned Opc =
4805         IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4806     unsigned BitWidth = Op.getValueSizeInBits();
4807     assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width");
4808     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1),
4809                        DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT));
4810   }
4811   case Intrinsic::riscv_shfl:
4812   case Intrinsic::riscv_unshfl: {
4813     unsigned Opc =
4814         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4815     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4816   }
4817   case Intrinsic::riscv_bcompress:
4818   case Intrinsic::riscv_bdecompress: {
4819     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
4820                                                        : RISCVISD::BDECOMPRESS;
4821     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
4822   }
4823   case Intrinsic::riscv_bfp:
4824     return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
4825                        Op.getOperand(2));
4826   case Intrinsic::riscv_fsl:
4827     return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
4828                        Op.getOperand(2), Op.getOperand(3));
4829   case Intrinsic::riscv_fsr:
4830     return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
4831                        Op.getOperand(2), Op.getOperand(3));
4832   case Intrinsic::riscv_vmv_x_s:
4833     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
4834     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
4835                        Op.getOperand(1));
4836   case Intrinsic::riscv_vmv_v_x:
4837     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
4838                             Op.getOperand(3), Op.getSimpleValueType(), DL, DAG,
4839                             Subtarget);
4840   case Intrinsic::riscv_vfmv_v_f:
4841     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
4842                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4843   case Intrinsic::riscv_vmv_s_x: {
4844     SDValue Scalar = Op.getOperand(2);
4845 
4846     if (Scalar.getValueType().bitsLE(XLenVT)) {
4847       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
4848       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
4849                          Op.getOperand(1), Scalar, Op.getOperand(3));
4850     }
4851 
4852     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
4853 
4854     // This is an i64 value that lives in two scalar registers. We have to
4855     // insert this in a convoluted way. First we build vXi64 splat containing
4856     // the two values that we assemble using some bit math. Next we'll use
4857     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
4858     // to merge element 0 from our splat into the source vector.
4859     // FIXME: This is probably not the best way to do this, but it is
4860     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
4861     // point.
4862     //   sw lo, (a0)
4863     //   sw hi, 4(a0)
4864     //   vlse vX, (a0)
4865     //
4866     //   vid.v      vVid
4867     //   vmseq.vx   mMask, vVid, 0
4868     //   vmerge.vvm vDest, vSrc, vVal, mMask
4869     MVT VT = Op.getSimpleValueType();
4870     SDValue Vec = Op.getOperand(1);
4871     SDValue VL = getVLOperand(Op);
4872 
4873     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG);
4874     if (Op.getOperand(1).isUndef())
4875       return SplattedVal;
4876     SDValue SplattedIdx =
4877         DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
4878                     DAG.getConstant(0, DL, MVT::i32), VL);
4879 
4880     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4881     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4882     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4883     SDValue SelectCond =
4884         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
4885                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
4886     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
4887                        Vec, VL);
4888   }
4889   }
4890 
4891   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4892 }
4893 
4894 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
4895                                                     SelectionDAG &DAG) const {
4896   unsigned IntNo = Op.getConstantOperandVal(1);
4897   switch (IntNo) {
4898   default:
4899     break;
4900   case Intrinsic::riscv_masked_strided_load: {
4901     SDLoc DL(Op);
4902     MVT XLenVT = Subtarget.getXLenVT();
4903 
4904     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4905     // the selection of the masked intrinsics doesn't do this for us.
4906     SDValue Mask = Op.getOperand(5);
4907     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4908 
4909     MVT VT = Op->getSimpleValueType(0);
4910     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4911 
4912     SDValue PassThru = Op.getOperand(2);
4913     if (!IsUnmasked) {
4914       MVT MaskVT =
4915           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4916       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4917       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4918     }
4919 
4920     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4921 
4922     SDValue IntID = DAG.getTargetConstant(
4923         IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4924         XLenVT);
4925 
4926     auto *Load = cast<MemIntrinsicSDNode>(Op);
4927     SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4928     if (IsUnmasked)
4929       Ops.push_back(DAG.getUNDEF(ContainerVT));
4930     else
4931       Ops.push_back(PassThru);
4932     Ops.push_back(Op.getOperand(3)); // Ptr
4933     Ops.push_back(Op.getOperand(4)); // Stride
4934     if (!IsUnmasked)
4935       Ops.push_back(Mask);
4936     Ops.push_back(VL);
4937     if (!IsUnmasked) {
4938       SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4939       Ops.push_back(Policy);
4940     }
4941 
4942     SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4943     SDValue Result =
4944         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4945                                 Load->getMemoryVT(), Load->getMemOperand());
4946     SDValue Chain = Result.getValue(1);
4947     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4948     return DAG.getMergeValues({Result, Chain}, DL);
4949   }
4950   case Intrinsic::riscv_seg2_load:
4951   case Intrinsic::riscv_seg3_load:
4952   case Intrinsic::riscv_seg4_load:
4953   case Intrinsic::riscv_seg5_load:
4954   case Intrinsic::riscv_seg6_load:
4955   case Intrinsic::riscv_seg7_load:
4956   case Intrinsic::riscv_seg8_load: {
4957     SDLoc DL(Op);
4958     static const Intrinsic::ID VlsegInts[7] = {
4959         Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3,
4960         Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5,
4961         Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7,
4962         Intrinsic::riscv_vlseg8};
4963     unsigned NF = Op->getNumValues() - 1;
4964     assert(NF >= 2 && NF <= 8 && "Unexpected seg number");
4965     MVT XLenVT = Subtarget.getXLenVT();
4966     MVT VT = Op->getSimpleValueType(0);
4967     MVT ContainerVT = getContainerForFixedLengthVector(VT);
4968 
4969     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4970     SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT);
4971     auto *Load = cast<MemIntrinsicSDNode>(Op);
4972     SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT);
4973     ContainerVTs.push_back(MVT::Other);
4974     SDVTList VTs = DAG.getVTList(ContainerVTs);
4975     SDValue Result =
4976         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs,
4977                                 {Load->getChain(), IntID, Op.getOperand(2), VL},
4978                                 Load->getMemoryVT(), Load->getMemOperand());
4979     SmallVector<SDValue, 9> Results;
4980     for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++)
4981       Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx),
4982                                                   DAG, Subtarget));
4983     Results.push_back(Result.getValue(NF));
4984     return DAG.getMergeValues(Results, DL);
4985   }
4986   }
4987 
4988   return lowerVectorIntrinsicScalars(Op, DAG, Subtarget);
4989 }
4990 
4991 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4992                                                  SelectionDAG &DAG) const {
4993   unsigned IntNo = Op.getConstantOperandVal(1);
4994   switch (IntNo) {
4995   default:
4996     break;
4997   case Intrinsic::riscv_masked_strided_store: {
4998     SDLoc DL(Op);
4999     MVT XLenVT = Subtarget.getXLenVT();
5000 
5001     // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5002     // the selection of the masked intrinsics doesn't do this for us.
5003     SDValue Mask = Op.getOperand(5);
5004     bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5005 
5006     SDValue Val = Op.getOperand(2);
5007     MVT VT = Val.getSimpleValueType();
5008     MVT ContainerVT = getContainerForFixedLengthVector(VT);
5009 
5010     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5011     if (!IsUnmasked) {
5012       MVT MaskVT =
5013           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5014       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5015     }
5016 
5017     SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5018 
5019     SDValue IntID = DAG.getTargetConstant(
5020         IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
5021         XLenVT);
5022 
5023     auto *Store = cast<MemIntrinsicSDNode>(Op);
5024     SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
5025     Ops.push_back(Val);
5026     Ops.push_back(Op.getOperand(3)); // Ptr
5027     Ops.push_back(Op.getOperand(4)); // Stride
5028     if (!IsUnmasked)
5029       Ops.push_back(Mask);
5030     Ops.push_back(VL);
5031 
5032     return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
5033                                    Ops, Store->getMemoryVT(),
5034                                    Store->getMemOperand());
5035   }
5036   }
5037 
5038   return SDValue();
5039 }
5040 
5041 static MVT getLMUL1VT(MVT VT) {
5042   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
5043          "Unexpected vector MVT");
5044   return MVT::getScalableVectorVT(
5045       VT.getVectorElementType(),
5046       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
5047 }
5048 
5049 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
5050   switch (ISDOpcode) {
5051   default:
5052     llvm_unreachable("Unhandled reduction");
5053   case ISD::VECREDUCE_ADD:
5054     return RISCVISD::VECREDUCE_ADD_VL;
5055   case ISD::VECREDUCE_UMAX:
5056     return RISCVISD::VECREDUCE_UMAX_VL;
5057   case ISD::VECREDUCE_SMAX:
5058     return RISCVISD::VECREDUCE_SMAX_VL;
5059   case ISD::VECREDUCE_UMIN:
5060     return RISCVISD::VECREDUCE_UMIN_VL;
5061   case ISD::VECREDUCE_SMIN:
5062     return RISCVISD::VECREDUCE_SMIN_VL;
5063   case ISD::VECREDUCE_AND:
5064     return RISCVISD::VECREDUCE_AND_VL;
5065   case ISD::VECREDUCE_OR:
5066     return RISCVISD::VECREDUCE_OR_VL;
5067   case ISD::VECREDUCE_XOR:
5068     return RISCVISD::VECREDUCE_XOR_VL;
5069   }
5070 }
5071 
5072 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
5073                                                          SelectionDAG &DAG,
5074                                                          bool IsVP) const {
5075   SDLoc DL(Op);
5076   SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
5077   MVT VecVT = Vec.getSimpleValueType();
5078   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
5079           Op.getOpcode() == ISD::VECREDUCE_OR ||
5080           Op.getOpcode() == ISD::VECREDUCE_XOR ||
5081           Op.getOpcode() == ISD::VP_REDUCE_AND ||
5082           Op.getOpcode() == ISD::VP_REDUCE_OR ||
5083           Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
5084          "Unexpected reduction lowering");
5085 
5086   MVT XLenVT = Subtarget.getXLenVT();
5087   assert(Op.getValueType() == XLenVT &&
5088          "Expected reduction output to be legalized to XLenVT");
5089 
5090   MVT ContainerVT = VecVT;
5091   if (VecVT.isFixedLengthVector()) {
5092     ContainerVT = getContainerForFixedLengthVector(VecVT);
5093     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5094   }
5095 
5096   SDValue Mask, VL;
5097   if (IsVP) {
5098     Mask = Op.getOperand(2);
5099     VL = Op.getOperand(3);
5100   } else {
5101     std::tie(Mask, VL) =
5102         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5103   }
5104 
5105   unsigned BaseOpc;
5106   ISD::CondCode CC;
5107   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
5108 
5109   switch (Op.getOpcode()) {
5110   default:
5111     llvm_unreachable("Unhandled reduction");
5112   case ISD::VECREDUCE_AND:
5113   case ISD::VP_REDUCE_AND: {
5114     // vcpop ~x == 0
5115     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
5116     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
5117     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5118     CC = ISD::SETEQ;
5119     BaseOpc = ISD::AND;
5120     break;
5121   }
5122   case ISD::VECREDUCE_OR:
5123   case ISD::VP_REDUCE_OR:
5124     // vcpop x != 0
5125     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5126     CC = ISD::SETNE;
5127     BaseOpc = ISD::OR;
5128     break;
5129   case ISD::VECREDUCE_XOR:
5130   case ISD::VP_REDUCE_XOR: {
5131     // ((vcpop x) & 1) != 0
5132     SDValue One = DAG.getConstant(1, DL, XLenVT);
5133     Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
5134     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
5135     CC = ISD::SETNE;
5136     BaseOpc = ISD::XOR;
5137     break;
5138   }
5139   }
5140 
5141   SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
5142 
5143   if (!IsVP)
5144     return SetCC;
5145 
5146   // Now include the start value in the operation.
5147   // Note that we must return the start value when no elements are operated
5148   // upon. The vcpop instructions we've emitted in each case above will return
5149   // 0 for an inactive vector, and so we've already received the neutral value:
5150   // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
5151   // can simply include the start value.
5152   return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
5153 }
5154 
5155 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
5156                                             SelectionDAG &DAG) const {
5157   SDLoc DL(Op);
5158   SDValue Vec = Op.getOperand(0);
5159   EVT VecEVT = Vec.getValueType();
5160 
5161   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
5162 
5163   // Due to ordering in legalize types we may have a vector type that needs to
5164   // be split. Do that manually so we can get down to a legal type.
5165   while (getTypeAction(*DAG.getContext(), VecEVT) ==
5166          TargetLowering::TypeSplitVector) {
5167     SDValue Lo, Hi;
5168     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
5169     VecEVT = Lo.getValueType();
5170     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
5171   }
5172 
5173   // TODO: The type may need to be widened rather than split. Or widened before
5174   // it can be split.
5175   if (!isTypeLegal(VecEVT))
5176     return SDValue();
5177 
5178   MVT VecVT = VecEVT.getSimpleVT();
5179   MVT VecEltVT = VecVT.getVectorElementType();
5180   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
5181 
5182   MVT ContainerVT = VecVT;
5183   if (VecVT.isFixedLengthVector()) {
5184     ContainerVT = getContainerForFixedLengthVector(VecVT);
5185     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5186   }
5187 
5188   MVT M1VT = getLMUL1VT(ContainerVT);
5189   MVT XLenVT = Subtarget.getXLenVT();
5190 
5191   SDValue Mask, VL;
5192   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5193 
5194   SDValue NeutralElem =
5195       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
5196   SDValue IdentitySplat =
5197       lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT),
5198                        M1VT, DL, DAG, Subtarget);
5199   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
5200                                   IdentitySplat, Mask, VL);
5201   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5202                              DAG.getConstant(0, DL, XLenVT));
5203   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5204 }
5205 
5206 // Given a reduction op, this function returns the matching reduction opcode,
5207 // the vector SDValue and the scalar SDValue required to lower this to a
5208 // RISCVISD node.
5209 static std::tuple<unsigned, SDValue, SDValue>
5210 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
5211   SDLoc DL(Op);
5212   auto Flags = Op->getFlags();
5213   unsigned Opcode = Op.getOpcode();
5214   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
5215   switch (Opcode) {
5216   default:
5217     llvm_unreachable("Unhandled reduction");
5218   case ISD::VECREDUCE_FADD: {
5219     // Use positive zero if we can. It is cheaper to materialize.
5220     SDValue Zero =
5221         DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT);
5222     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero);
5223   }
5224   case ISD::VECREDUCE_SEQ_FADD:
5225     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
5226                            Op.getOperand(0));
5227   case ISD::VECREDUCE_FMIN:
5228     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
5229                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5230   case ISD::VECREDUCE_FMAX:
5231     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
5232                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
5233   }
5234 }
5235 
5236 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
5237                                               SelectionDAG &DAG) const {
5238   SDLoc DL(Op);
5239   MVT VecEltVT = Op.getSimpleValueType();
5240 
5241   unsigned RVVOpcode;
5242   SDValue VectorVal, ScalarVal;
5243   std::tie(RVVOpcode, VectorVal, ScalarVal) =
5244       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
5245   MVT VecVT = VectorVal.getSimpleValueType();
5246 
5247   MVT ContainerVT = VecVT;
5248   if (VecVT.isFixedLengthVector()) {
5249     ContainerVT = getContainerForFixedLengthVector(VecVT);
5250     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
5251   }
5252 
5253   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
5254   MVT XLenVT = Subtarget.getXLenVT();
5255 
5256   SDValue Mask, VL;
5257   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
5258 
5259   SDValue ScalarSplat =
5260       lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT),
5261                        M1VT, DL, DAG, Subtarget);
5262   SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
5263                                   VectorVal, ScalarSplat, Mask, VL);
5264   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
5265                      DAG.getConstant(0, DL, XLenVT));
5266 }
5267 
5268 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
5269   switch (ISDOpcode) {
5270   default:
5271     llvm_unreachable("Unhandled reduction");
5272   case ISD::VP_REDUCE_ADD:
5273     return RISCVISD::VECREDUCE_ADD_VL;
5274   case ISD::VP_REDUCE_UMAX:
5275     return RISCVISD::VECREDUCE_UMAX_VL;
5276   case ISD::VP_REDUCE_SMAX:
5277     return RISCVISD::VECREDUCE_SMAX_VL;
5278   case ISD::VP_REDUCE_UMIN:
5279     return RISCVISD::VECREDUCE_UMIN_VL;
5280   case ISD::VP_REDUCE_SMIN:
5281     return RISCVISD::VECREDUCE_SMIN_VL;
5282   case ISD::VP_REDUCE_AND:
5283     return RISCVISD::VECREDUCE_AND_VL;
5284   case ISD::VP_REDUCE_OR:
5285     return RISCVISD::VECREDUCE_OR_VL;
5286   case ISD::VP_REDUCE_XOR:
5287     return RISCVISD::VECREDUCE_XOR_VL;
5288   case ISD::VP_REDUCE_FADD:
5289     return RISCVISD::VECREDUCE_FADD_VL;
5290   case ISD::VP_REDUCE_SEQ_FADD:
5291     return RISCVISD::VECREDUCE_SEQ_FADD_VL;
5292   case ISD::VP_REDUCE_FMAX:
5293     return RISCVISD::VECREDUCE_FMAX_VL;
5294   case ISD::VP_REDUCE_FMIN:
5295     return RISCVISD::VECREDUCE_FMIN_VL;
5296   }
5297 }
5298 
5299 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
5300                                            SelectionDAG &DAG) const {
5301   SDLoc DL(Op);
5302   SDValue Vec = Op.getOperand(1);
5303   EVT VecEVT = Vec.getValueType();
5304 
5305   // TODO: The type may need to be widened rather than split. Or widened before
5306   // it can be split.
5307   if (!isTypeLegal(VecEVT))
5308     return SDValue();
5309 
5310   MVT VecVT = VecEVT.getSimpleVT();
5311   MVT VecEltVT = VecVT.getVectorElementType();
5312   unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
5313 
5314   MVT ContainerVT = VecVT;
5315   if (VecVT.isFixedLengthVector()) {
5316     ContainerVT = getContainerForFixedLengthVector(VecVT);
5317     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5318   }
5319 
5320   SDValue VL = Op.getOperand(3);
5321   SDValue Mask = Op.getOperand(2);
5322 
5323   MVT M1VT = getLMUL1VT(ContainerVT);
5324   MVT XLenVT = Subtarget.getXLenVT();
5325   MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
5326 
5327   SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0),
5328                                         DAG.getConstant(1, DL, XLenVT), M1VT,
5329                                         DL, DAG, Subtarget);
5330   SDValue Reduction =
5331       DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
5332   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
5333                              DAG.getConstant(0, DL, XLenVT));
5334   if (!VecVT.isInteger())
5335     return Elt0;
5336   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
5337 }
5338 
5339 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
5340                                                    SelectionDAG &DAG) const {
5341   SDValue Vec = Op.getOperand(0);
5342   SDValue SubVec = Op.getOperand(1);
5343   MVT VecVT = Vec.getSimpleValueType();
5344   MVT SubVecVT = SubVec.getSimpleValueType();
5345 
5346   SDLoc DL(Op);
5347   MVT XLenVT = Subtarget.getXLenVT();
5348   unsigned OrigIdx = Op.getConstantOperandVal(2);
5349   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5350 
5351   // We don't have the ability to slide mask vectors up indexed by their i1
5352   // elements; the smallest we can do is i8. Often we are able to bitcast to
5353   // equivalent i8 vectors. Note that when inserting a fixed-length vector
5354   // into a scalable one, we might not necessarily have enough scalable
5355   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
5356   if (SubVecVT.getVectorElementType() == MVT::i1 &&
5357       (OrigIdx != 0 || !Vec.isUndef())) {
5358     if (VecVT.getVectorMinNumElements() >= 8 &&
5359         SubVecVT.getVectorMinNumElements() >= 8) {
5360       assert(OrigIdx % 8 == 0 && "Invalid index");
5361       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5362              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5363              "Unexpected mask vector lowering");
5364       OrigIdx /= 8;
5365       SubVecVT =
5366           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5367                            SubVecVT.isScalableVector());
5368       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5369                                VecVT.isScalableVector());
5370       Vec = DAG.getBitcast(VecVT, Vec);
5371       SubVec = DAG.getBitcast(SubVecVT, SubVec);
5372     } else {
5373       // We can't slide this mask vector up indexed by its i1 elements.
5374       // This poses a problem when we wish to insert a scalable vector which
5375       // can't be re-expressed as a larger type. Just choose the slow path and
5376       // extend to a larger type, then truncate back down.
5377       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5378       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5379       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5380       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
5381       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
5382                         Op.getOperand(2));
5383       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
5384       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
5385     }
5386   }
5387 
5388   // If the subvector vector is a fixed-length type, we cannot use subregister
5389   // manipulation to simplify the codegen; we don't know which register of a
5390   // LMUL group contains the specific subvector as we only know the minimum
5391   // register size. Therefore we must slide the vector group up the full
5392   // amount.
5393   if (SubVecVT.isFixedLengthVector()) {
5394     if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector())
5395       return Op;
5396     MVT ContainerVT = VecVT;
5397     if (VecVT.isFixedLengthVector()) {
5398       ContainerVT = getContainerForFixedLengthVector(VecVT);
5399       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5400     }
5401     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
5402                          DAG.getUNDEF(ContainerVT), SubVec,
5403                          DAG.getConstant(0, DL, XLenVT));
5404     if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) {
5405       SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget);
5406       return DAG.getBitcast(Op.getValueType(), SubVec);
5407     }
5408     SDValue Mask =
5409         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5410     // Set the vector length to only the number of elements we care about. Note
5411     // that for slideup this includes the offset.
5412     SDValue VL =
5413         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
5414     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5415     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
5416                                   SubVec, SlideupAmt, Mask, VL);
5417     if (VecVT.isFixedLengthVector())
5418       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
5419     return DAG.getBitcast(Op.getValueType(), Slideup);
5420   }
5421 
5422   unsigned SubRegIdx, RemIdx;
5423   std::tie(SubRegIdx, RemIdx) =
5424       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5425           VecVT, SubVecVT, OrigIdx, TRI);
5426 
5427   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
5428   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
5429                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
5430                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
5431 
5432   // 1. If the Idx has been completely eliminated and this subvector's size is
5433   // a vector register or a multiple thereof, or the surrounding elements are
5434   // undef, then this is a subvector insert which naturally aligns to a vector
5435   // register. These can easily be handled using subregister manipulation.
5436   // 2. If the subvector is smaller than a vector register, then the insertion
5437   // must preserve the undisturbed elements of the register. We do this by
5438   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
5439   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
5440   // subvector within the vector register, and an INSERT_SUBVECTOR of that
5441   // LMUL=1 type back into the larger vector (resolving to another subregister
5442   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
5443   // to avoid allocating a large register group to hold our subvector.
5444   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
5445     return Op;
5446 
5447   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
5448   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
5449   // (in our case undisturbed). This means we can set up a subvector insertion
5450   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
5451   // size of the subvector.
5452   MVT InterSubVT = VecVT;
5453   SDValue AlignedExtract = Vec;
5454   unsigned AlignedIdx = OrigIdx - RemIdx;
5455   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5456     InterSubVT = getLMUL1VT(VecVT);
5457     // Extract a subvector equal to the nearest full vector register type. This
5458     // should resolve to a EXTRACT_SUBREG instruction.
5459     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5460                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
5461   }
5462 
5463   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5464   // For scalable vectors this must be further multiplied by vscale.
5465   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
5466 
5467   SDValue Mask, VL;
5468   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5469 
5470   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
5471   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
5472   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
5473   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
5474 
5475   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
5476                        DAG.getUNDEF(InterSubVT), SubVec,
5477                        DAG.getConstant(0, DL, XLenVT));
5478 
5479   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
5480                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
5481 
5482   // If required, insert this subvector back into the correct vector register.
5483   // This should resolve to an INSERT_SUBREG instruction.
5484   if (VecVT.bitsGT(InterSubVT))
5485     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
5486                           DAG.getConstant(AlignedIdx, DL, XLenVT));
5487 
5488   // We might have bitcast from a mask type: cast back to the original type if
5489   // required.
5490   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
5491 }
5492 
5493 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
5494                                                     SelectionDAG &DAG) const {
5495   SDValue Vec = Op.getOperand(0);
5496   MVT SubVecVT = Op.getSimpleValueType();
5497   MVT VecVT = Vec.getSimpleValueType();
5498 
5499   SDLoc DL(Op);
5500   MVT XLenVT = Subtarget.getXLenVT();
5501   unsigned OrigIdx = Op.getConstantOperandVal(1);
5502   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
5503 
5504   // We don't have the ability to slide mask vectors down indexed by their i1
5505   // elements; the smallest we can do is i8. Often we are able to bitcast to
5506   // equivalent i8 vectors. Note that when extracting a fixed-length vector
5507   // from a scalable one, we might not necessarily have enough scalable
5508   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
5509   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
5510     if (VecVT.getVectorMinNumElements() >= 8 &&
5511         SubVecVT.getVectorMinNumElements() >= 8) {
5512       assert(OrigIdx % 8 == 0 && "Invalid index");
5513       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
5514              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
5515              "Unexpected mask vector lowering");
5516       OrigIdx /= 8;
5517       SubVecVT =
5518           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
5519                            SubVecVT.isScalableVector());
5520       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
5521                                VecVT.isScalableVector());
5522       Vec = DAG.getBitcast(VecVT, Vec);
5523     } else {
5524       // We can't slide this mask vector down, indexed by its i1 elements.
5525       // This poses a problem when we wish to extract a scalable vector which
5526       // can't be re-expressed as a larger type. Just choose the slow path and
5527       // extend to a larger type, then truncate back down.
5528       // TODO: We could probably improve this when extracting certain fixed
5529       // from fixed, where we can extract as i8 and shift the correct element
5530       // right to reach the desired subvector?
5531       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
5532       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
5533       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
5534       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
5535                         Op.getOperand(1));
5536       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
5537       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
5538     }
5539   }
5540 
5541   // If the subvector vector is a fixed-length type, we cannot use subregister
5542   // manipulation to simplify the codegen; we don't know which register of a
5543   // LMUL group contains the specific subvector as we only know the minimum
5544   // register size. Therefore we must slide the vector group down the full
5545   // amount.
5546   if (SubVecVT.isFixedLengthVector()) {
5547     // With an index of 0 this is a cast-like subvector, which can be performed
5548     // with subregister operations.
5549     if (OrigIdx == 0)
5550       return Op;
5551     MVT ContainerVT = VecVT;
5552     if (VecVT.isFixedLengthVector()) {
5553       ContainerVT = getContainerForFixedLengthVector(VecVT);
5554       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5555     }
5556     SDValue Mask =
5557         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
5558     // Set the vector length to only the number of elements we care about. This
5559     // avoids sliding down elements we're going to discard straight away.
5560     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
5561     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
5562     SDValue Slidedown =
5563         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5564                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
5565     // Now we can use a cast-like subvector extract to get the result.
5566     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5567                             DAG.getConstant(0, DL, XLenVT));
5568     return DAG.getBitcast(Op.getValueType(), Slidedown);
5569   }
5570 
5571   unsigned SubRegIdx, RemIdx;
5572   std::tie(SubRegIdx, RemIdx) =
5573       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
5574           VecVT, SubVecVT, OrigIdx, TRI);
5575 
5576   // If the Idx has been completely eliminated then this is a subvector extract
5577   // which naturally aligns to a vector register. These can easily be handled
5578   // using subregister manipulation.
5579   if (RemIdx == 0)
5580     return Op;
5581 
5582   // Else we must shift our vector register directly to extract the subvector.
5583   // Do this using VSLIDEDOWN.
5584 
5585   // If the vector type is an LMUL-group type, extract a subvector equal to the
5586   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
5587   // instruction.
5588   MVT InterSubVT = VecVT;
5589   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
5590     InterSubVT = getLMUL1VT(VecVT);
5591     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
5592                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
5593   }
5594 
5595   // Slide this vector register down by the desired number of elements in order
5596   // to place the desired subvector starting at element 0.
5597   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
5598   // For scalable vectors this must be further multiplied by vscale.
5599   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
5600 
5601   SDValue Mask, VL;
5602   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
5603   SDValue Slidedown =
5604       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
5605                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
5606 
5607   // Now the vector is in the right position, extract our final subvector. This
5608   // should resolve to a COPY.
5609   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
5610                           DAG.getConstant(0, DL, XLenVT));
5611 
5612   // We might have bitcast from a mask type: cast back to the original type if
5613   // required.
5614   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
5615 }
5616 
5617 // Lower step_vector to the vid instruction. Any non-identity step value must
5618 // be accounted for my manual expansion.
5619 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
5620                                               SelectionDAG &DAG) const {
5621   SDLoc DL(Op);
5622   MVT VT = Op.getSimpleValueType();
5623   MVT XLenVT = Subtarget.getXLenVT();
5624   SDValue Mask, VL;
5625   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
5626   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
5627   uint64_t StepValImm = Op.getConstantOperandVal(0);
5628   if (StepValImm != 1) {
5629     if (isPowerOf2_64(StepValImm)) {
5630       SDValue StepVal =
5631           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
5632                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
5633       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
5634     } else {
5635       SDValue StepVal = lowerScalarSplat(
5636           SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()),
5637           VL, VT, DL, DAG, Subtarget);
5638       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
5639     }
5640   }
5641   return StepVec;
5642 }
5643 
5644 // Implement vector_reverse using vrgather.vv with indices determined by
5645 // subtracting the id of each element from (VLMAX-1). This will convert
5646 // the indices like so:
5647 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
5648 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
5649 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
5650                                                  SelectionDAG &DAG) const {
5651   SDLoc DL(Op);
5652   MVT VecVT = Op.getSimpleValueType();
5653   unsigned EltSize = VecVT.getScalarSizeInBits();
5654   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
5655 
5656   unsigned MaxVLMAX = 0;
5657   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
5658   if (VectorBitsMax != 0)
5659     MaxVLMAX =
5660         RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize);
5661 
5662   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
5663   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
5664 
5665   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
5666   // to use vrgatherei16.vv.
5667   // TODO: It's also possible to use vrgatherei16.vv for other types to
5668   // decrease register width for the index calculation.
5669   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
5670     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
5671     // Reverse each half, then reassemble them in reverse order.
5672     // NOTE: It's also possible that after splitting that VLMAX no longer
5673     // requires vrgatherei16.vv.
5674     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
5675       SDValue Lo, Hi;
5676       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
5677       EVT LoVT, HiVT;
5678       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
5679       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
5680       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
5681       // Reassemble the low and high pieces reversed.
5682       // FIXME: This is a CONCAT_VECTORS.
5683       SDValue Res =
5684           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
5685                       DAG.getIntPtrConstant(0, DL));
5686       return DAG.getNode(
5687           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
5688           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
5689     }
5690 
5691     // Just promote the int type to i16 which will double the LMUL.
5692     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
5693     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
5694   }
5695 
5696   MVT XLenVT = Subtarget.getXLenVT();
5697   SDValue Mask, VL;
5698   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
5699 
5700   // Calculate VLMAX-1 for the desired SEW.
5701   unsigned MinElts = VecVT.getVectorMinNumElements();
5702   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5703                               DAG.getConstant(MinElts, DL, XLenVT));
5704   SDValue VLMinus1 =
5705       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
5706 
5707   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
5708   bool IsRV32E64 =
5709       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
5710   SDValue SplatVL;
5711   if (!IsRV32E64)
5712     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
5713   else
5714     SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT),
5715                           VLMinus1, DAG.getRegister(RISCV::X0, XLenVT));
5716 
5717   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
5718   SDValue Indices =
5719       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
5720 
5721   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
5722 }
5723 
5724 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
5725                                                 SelectionDAG &DAG) const {
5726   SDLoc DL(Op);
5727   SDValue V1 = Op.getOperand(0);
5728   SDValue V2 = Op.getOperand(1);
5729   MVT XLenVT = Subtarget.getXLenVT();
5730   MVT VecVT = Op.getSimpleValueType();
5731 
5732   unsigned MinElts = VecVT.getVectorMinNumElements();
5733   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
5734                               DAG.getConstant(MinElts, DL, XLenVT));
5735 
5736   int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue();
5737   SDValue DownOffset, UpOffset;
5738   if (ImmValue >= 0) {
5739     // The operand is a TargetConstant, we need to rebuild it as a regular
5740     // constant.
5741     DownOffset = DAG.getConstant(ImmValue, DL, XLenVT);
5742     UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset);
5743   } else {
5744     // The operand is a TargetConstant, we need to rebuild it as a regular
5745     // constant rather than negating the original operand.
5746     UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT);
5747     DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset);
5748   }
5749 
5750   MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5751   SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VLMax);
5752 
5753   SDValue SlideDown =
5754       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1,
5755                   DownOffset, TrueMask, UpOffset);
5756   return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset,
5757                      TrueMask,
5758                      DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT));
5759 }
5760 
5761 SDValue
5762 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
5763                                                      SelectionDAG &DAG) const {
5764   SDLoc DL(Op);
5765   auto *Load = cast<LoadSDNode>(Op);
5766 
5767   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5768                                         Load->getMemoryVT(),
5769                                         *Load->getMemOperand()) &&
5770          "Expecting a correctly-aligned load");
5771 
5772   MVT VT = Op.getSimpleValueType();
5773   MVT XLenVT = Subtarget.getXLenVT();
5774   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5775 
5776   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5777 
5778   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5779   SDValue IntID = DAG.getTargetConstant(
5780       IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT);
5781   SmallVector<SDValue, 4> Ops{Load->getChain(), IntID};
5782   if (!IsMaskOp)
5783     Ops.push_back(DAG.getUNDEF(ContainerVT));
5784   Ops.push_back(Load->getBasePtr());
5785   Ops.push_back(VL);
5786   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5787   SDValue NewLoad =
5788       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
5789                               Load->getMemoryVT(), Load->getMemOperand());
5790 
5791   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
5792   return DAG.getMergeValues({Result, Load->getChain()}, DL);
5793 }
5794 
5795 SDValue
5796 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
5797                                                       SelectionDAG &DAG) const {
5798   SDLoc DL(Op);
5799   auto *Store = cast<StoreSDNode>(Op);
5800 
5801   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
5802                                         Store->getMemoryVT(),
5803                                         *Store->getMemOperand()) &&
5804          "Expecting a correctly-aligned store");
5805 
5806   SDValue StoreVal = Store->getValue();
5807   MVT VT = StoreVal.getSimpleValueType();
5808   MVT XLenVT = Subtarget.getXLenVT();
5809 
5810   // If the size less than a byte, we need to pad with zeros to make a byte.
5811   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
5812     VT = MVT::v8i1;
5813     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
5814                            DAG.getConstant(0, DL, VT), StoreVal,
5815                            DAG.getIntPtrConstant(0, DL));
5816   }
5817 
5818   MVT ContainerVT = getContainerForFixedLengthVector(VT);
5819 
5820   SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
5821 
5822   SDValue NewValue =
5823       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
5824 
5825   bool IsMaskOp = VT.getVectorElementType() == MVT::i1;
5826   SDValue IntID = DAG.getTargetConstant(
5827       IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT);
5828   return DAG.getMemIntrinsicNode(
5829       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
5830       {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL},
5831       Store->getMemoryVT(), Store->getMemOperand());
5832 }
5833 
5834 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
5835                                              SelectionDAG &DAG) const {
5836   SDLoc DL(Op);
5837   MVT VT = Op.getSimpleValueType();
5838 
5839   const auto *MemSD = cast<MemSDNode>(Op);
5840   EVT MemVT = MemSD->getMemoryVT();
5841   MachineMemOperand *MMO = MemSD->getMemOperand();
5842   SDValue Chain = MemSD->getChain();
5843   SDValue BasePtr = MemSD->getBasePtr();
5844 
5845   SDValue Mask, PassThru, VL;
5846   if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
5847     Mask = VPLoad->getMask();
5848     PassThru = DAG.getUNDEF(VT);
5849     VL = VPLoad->getVectorLength();
5850   } else {
5851     const auto *MLoad = cast<MaskedLoadSDNode>(Op);
5852     Mask = MLoad->getMask();
5853     PassThru = MLoad->getPassThru();
5854   }
5855 
5856   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5857 
5858   MVT XLenVT = Subtarget.getXLenVT();
5859 
5860   MVT ContainerVT = VT;
5861   if (VT.isFixedLengthVector()) {
5862     ContainerVT = getContainerForFixedLengthVector(VT);
5863     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5864     if (!IsUnmasked) {
5865       MVT MaskVT =
5866           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5867       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5868     }
5869   }
5870 
5871   if (!VL)
5872     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5873 
5874   unsigned IntID =
5875       IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask;
5876   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5877   if (IsUnmasked)
5878     Ops.push_back(DAG.getUNDEF(ContainerVT));
5879   else
5880     Ops.push_back(PassThru);
5881   Ops.push_back(BasePtr);
5882   if (!IsUnmasked)
5883     Ops.push_back(Mask);
5884   Ops.push_back(VL);
5885   if (!IsUnmasked)
5886     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5887 
5888   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5889 
5890   SDValue Result =
5891       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5892   Chain = Result.getValue(1);
5893 
5894   if (VT.isFixedLengthVector())
5895     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5896 
5897   return DAG.getMergeValues({Result, Chain}, DL);
5898 }
5899 
5900 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
5901                                               SelectionDAG &DAG) const {
5902   SDLoc DL(Op);
5903 
5904   const auto *MemSD = cast<MemSDNode>(Op);
5905   EVT MemVT = MemSD->getMemoryVT();
5906   MachineMemOperand *MMO = MemSD->getMemOperand();
5907   SDValue Chain = MemSD->getChain();
5908   SDValue BasePtr = MemSD->getBasePtr();
5909   SDValue Val, Mask, VL;
5910 
5911   if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
5912     Val = VPStore->getValue();
5913     Mask = VPStore->getMask();
5914     VL = VPStore->getVectorLength();
5915   } else {
5916     const auto *MStore = cast<MaskedStoreSDNode>(Op);
5917     Val = MStore->getValue();
5918     Mask = MStore->getMask();
5919   }
5920 
5921   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5922 
5923   MVT VT = Val.getSimpleValueType();
5924   MVT XLenVT = Subtarget.getXLenVT();
5925 
5926   MVT ContainerVT = VT;
5927   if (VT.isFixedLengthVector()) {
5928     ContainerVT = getContainerForFixedLengthVector(VT);
5929 
5930     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5931     if (!IsUnmasked) {
5932       MVT MaskVT =
5933           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5934       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5935     }
5936   }
5937 
5938   if (!VL)
5939     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5940 
5941   unsigned IntID =
5942       IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
5943   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5944   Ops.push_back(Val);
5945   Ops.push_back(BasePtr);
5946   if (!IsUnmasked)
5947     Ops.push_back(Mask);
5948   Ops.push_back(VL);
5949 
5950   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5951                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5952 }
5953 
5954 SDValue
5955 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
5956                                                       SelectionDAG &DAG) const {
5957   MVT InVT = Op.getOperand(0).getSimpleValueType();
5958   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
5959 
5960   MVT VT = Op.getSimpleValueType();
5961 
5962   SDValue Op1 =
5963       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
5964   SDValue Op2 =
5965       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5966 
5967   SDLoc DL(Op);
5968   SDValue VL =
5969       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
5970 
5971   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5972   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5973 
5974   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
5975                             Op.getOperand(2), Mask, VL);
5976 
5977   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
5978 }
5979 
5980 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
5981     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
5982   MVT VT = Op.getSimpleValueType();
5983 
5984   if (VT.getVectorElementType() == MVT::i1)
5985     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
5986 
5987   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
5988 }
5989 
5990 SDValue
5991 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
5992                                                       SelectionDAG &DAG) const {
5993   unsigned Opc;
5994   switch (Op.getOpcode()) {
5995   default: llvm_unreachable("Unexpected opcode!");
5996   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
5997   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
5998   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
5999   }
6000 
6001   return lowerToScalableOp(Op, DAG, Opc);
6002 }
6003 
6004 // Lower vector ABS to smax(X, sub(0, X)).
6005 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
6006   SDLoc DL(Op);
6007   MVT VT = Op.getSimpleValueType();
6008   SDValue X = Op.getOperand(0);
6009 
6010   assert(VT.isFixedLengthVector() && "Unexpected type");
6011 
6012   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6013   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
6014 
6015   SDValue Mask, VL;
6016   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6017 
6018   SDValue SplatZero = DAG.getNode(
6019       RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT),
6020       DAG.getConstant(0, DL, Subtarget.getXLenVT()));
6021   SDValue NegX =
6022       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
6023   SDValue Max =
6024       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
6025 
6026   return convertFromScalableVector(VT, Max, DAG, Subtarget);
6027 }
6028 
6029 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
6030     SDValue Op, SelectionDAG &DAG) const {
6031   SDLoc DL(Op);
6032   MVT VT = Op.getSimpleValueType();
6033   SDValue Mag = Op.getOperand(0);
6034   SDValue Sign = Op.getOperand(1);
6035   assert(Mag.getValueType() == Sign.getValueType() &&
6036          "Can only handle COPYSIGN with matching types.");
6037 
6038   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6039   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
6040   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
6041 
6042   SDValue Mask, VL;
6043   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6044 
6045   SDValue CopySign =
6046       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
6047 
6048   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
6049 }
6050 
6051 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
6052     SDValue Op, SelectionDAG &DAG) const {
6053   MVT VT = Op.getSimpleValueType();
6054   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6055 
6056   MVT I1ContainerVT =
6057       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6058 
6059   SDValue CC =
6060       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
6061   SDValue Op1 =
6062       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
6063   SDValue Op2 =
6064       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
6065 
6066   SDLoc DL(Op);
6067   SDValue Mask, VL;
6068   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6069 
6070   SDValue Select =
6071       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
6072 
6073   return convertFromScalableVector(VT, Select, DAG, Subtarget);
6074 }
6075 
6076 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
6077                                                unsigned NewOpc,
6078                                                bool HasMask) const {
6079   MVT VT = Op.getSimpleValueType();
6080   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6081 
6082   // Create list of operands by converting existing ones to scalable types.
6083   SmallVector<SDValue, 6> Ops;
6084   for (const SDValue &V : Op->op_values()) {
6085     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6086 
6087     // Pass through non-vector operands.
6088     if (!V.getValueType().isVector()) {
6089       Ops.push_back(V);
6090       continue;
6091     }
6092 
6093     // "cast" fixed length vector to a scalable vector.
6094     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
6095            "Only fixed length vectors are supported!");
6096     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6097   }
6098 
6099   SDLoc DL(Op);
6100   SDValue Mask, VL;
6101   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
6102   if (HasMask)
6103     Ops.push_back(Mask);
6104   Ops.push_back(VL);
6105 
6106   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
6107   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
6108 }
6109 
6110 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
6111 // * Operands of each node are assumed to be in the same order.
6112 // * The EVL operand is promoted from i32 to i64 on RV64.
6113 // * Fixed-length vectors are converted to their scalable-vector container
6114 //   types.
6115 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
6116                                        unsigned RISCVISDOpc) const {
6117   SDLoc DL(Op);
6118   MVT VT = Op.getSimpleValueType();
6119   SmallVector<SDValue, 4> Ops;
6120 
6121   for (const auto &OpIdx : enumerate(Op->ops())) {
6122     SDValue V = OpIdx.value();
6123     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
6124     // Pass through operands which aren't fixed-length vectors.
6125     if (!V.getValueType().isFixedLengthVector()) {
6126       Ops.push_back(V);
6127       continue;
6128     }
6129     // "cast" fixed length vector to a scalable vector.
6130     MVT OpVT = V.getSimpleValueType();
6131     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
6132     assert(useRVVForFixedLengthVectorVT(OpVT) &&
6133            "Only fixed length vectors are supported!");
6134     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
6135   }
6136 
6137   if (!VT.isFixedLengthVector())
6138     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
6139 
6140   MVT ContainerVT = getContainerForFixedLengthVector(VT);
6141 
6142   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
6143 
6144   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
6145 }
6146 
6147 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
6148                                               SelectionDAG &DAG) const {
6149   SDLoc DL(Op);
6150   MVT VT = Op.getSimpleValueType();
6151 
6152   SDValue Src = Op.getOperand(0);
6153   // NOTE: Mask is dropped.
6154   SDValue VL = Op.getOperand(2);
6155 
6156   MVT ContainerVT = VT;
6157   if (VT.isFixedLengthVector()) {
6158     ContainerVT = getContainerForFixedLengthVector(VT);
6159     MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6160     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6161   }
6162 
6163   MVT XLenVT = Subtarget.getXLenVT();
6164   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6165   SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6166                                   DAG.getUNDEF(ContainerVT), Zero, VL);
6167 
6168   SDValue SplatValue =
6169       DAG.getConstant(Op.getOpcode() == ISD::VP_ZEXT ? 1 : -1, DL, XLenVT);
6170   SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
6171                               DAG.getUNDEF(ContainerVT), SplatValue, VL);
6172 
6173   SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src,
6174                                Splat, ZeroSplat, VL);
6175   if (!VT.isFixedLengthVector())
6176     return Result;
6177   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6178 }
6179 
6180 // Lower Floating-Point/Integer Type-Convert VP SDNodes
6181 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG,
6182                                                 unsigned RISCVISDOpc) const {
6183   SDLoc DL(Op);
6184 
6185   SDValue Src = Op.getOperand(0);
6186   SDValue Mask = Op.getOperand(1);
6187   SDValue VL = Op.getOperand(2);
6188 
6189   MVT DstVT = Op.getSimpleValueType();
6190   MVT SrcVT = Src.getSimpleValueType();
6191   if (DstVT.isFixedLengthVector()) {
6192     DstVT = getContainerForFixedLengthVector(DstVT);
6193     SrcVT = getContainerForFixedLengthVector(SrcVT);
6194     Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget);
6195     MVT MaskVT = MVT::getVectorVT(MVT::i1, DstVT.getVectorElementCount());
6196     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6197   }
6198 
6199   unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL ||
6200                              RISCVISDOpc == RISCVISD::FP_TO_SINT_VL)
6201                                 ? RISCVISD::VSEXT_VL
6202                                 : RISCVISD::VZEXT_VL;
6203 
6204   unsigned DstEltSize = DstVT.getScalarSizeInBits();
6205   unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
6206 
6207   SDValue Result;
6208   if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion.
6209     if (SrcVT.isInteger()) {
6210       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6211 
6212       // Do we need to do any pre-widening before converting?
6213       if (SrcEltSize == 1) {
6214         MVT IntVT = DstVT.changeVectorElementTypeToInteger();
6215         MVT XLenVT = Subtarget.getXLenVT();
6216         SDValue Zero = DAG.getConstant(0, DL, XLenVT);
6217         SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6218                                         DAG.getUNDEF(IntVT), Zero, VL);
6219         SDValue One = DAG.getConstant(
6220             RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT);
6221         SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT,
6222                                        DAG.getUNDEF(IntVT), One, VL);
6223         Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat,
6224                           ZeroSplat, VL);
6225       } else if (DstEltSize > (2 * SrcEltSize)) {
6226         // Widen before converting.
6227         MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2),
6228                                      DstVT.getVectorElementCount());
6229         Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL);
6230       }
6231 
6232       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6233     } else {
6234       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6235              "Wrong input/output vector types");
6236 
6237       // Convert f16 to f32 then convert f32 to i64.
6238       if (DstEltSize > (2 * SrcEltSize)) {
6239         assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6240         MVT InterimFVT =
6241             MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6242         Src =
6243             DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL);
6244       }
6245 
6246       Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL);
6247     }
6248   } else { // Narrowing + Conversion
6249     if (SrcVT.isInteger()) {
6250       assert(DstVT.isFloatingPoint() && "Wrong input/output vector types");
6251       // First do a narrowing convert to an FP type half the size, then round
6252       // the FP type to a small FP type if needed.
6253 
6254       MVT InterimFVT = DstVT;
6255       if (SrcEltSize > (2 * DstEltSize)) {
6256         assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!");
6257         assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!");
6258         InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount());
6259       }
6260 
6261       Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL);
6262 
6263       if (InterimFVT != DstVT) {
6264         Src = Result;
6265         Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL);
6266       }
6267     } else {
6268       assert(SrcVT.isFloatingPoint() && DstVT.isInteger() &&
6269              "Wrong input/output vector types");
6270       // First do a narrowing conversion to an integer half the size, then
6271       // truncate if needed.
6272 
6273       if (DstEltSize == 1) {
6274         // First convert to the same size integer, then convert to mask using
6275         // setcc.
6276         assert(SrcEltSize >= 16 && "Unexpected FP type!");
6277         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize),
6278                                           DstVT.getVectorElementCount());
6279         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6280 
6281         // Compare the integer result to 0. The integer should be 0 or 1/-1,
6282         // otherwise the conversion was undefined.
6283         MVT XLenVT = Subtarget.getXLenVT();
6284         SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
6285         SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT,
6286                                 DAG.getUNDEF(InterimIVT), SplatZero);
6287         Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero,
6288                              DAG.getCondCode(ISD::SETNE), Mask, VL);
6289       } else {
6290         MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6291                                           DstVT.getVectorElementCount());
6292 
6293         Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL);
6294 
6295         while (InterimIVT != DstVT) {
6296           SrcEltSize /= 2;
6297           Src = Result;
6298           InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2),
6299                                         DstVT.getVectorElementCount());
6300           Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT,
6301                                Src, Mask, VL);
6302         }
6303       }
6304     }
6305   }
6306 
6307   MVT VT = Op.getSimpleValueType();
6308   if (!VT.isFixedLengthVector())
6309     return Result;
6310   return convertFromScalableVector(VT, Result, DAG, Subtarget);
6311 }
6312 
6313 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG,
6314                                             unsigned MaskOpc,
6315                                             unsigned VecOpc) const {
6316   MVT VT = Op.getSimpleValueType();
6317   if (VT.getVectorElementType() != MVT::i1)
6318     return lowerVPOp(Op, DAG, VecOpc);
6319 
6320   // It is safe to drop mask parameter as masked-off elements are undef.
6321   SDValue Op1 = Op->getOperand(0);
6322   SDValue Op2 = Op->getOperand(1);
6323   SDValue VL = Op->getOperand(3);
6324 
6325   MVT ContainerVT = VT;
6326   const bool IsFixed = VT.isFixedLengthVector();
6327   if (IsFixed) {
6328     ContainerVT = getContainerForFixedLengthVector(VT);
6329     Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget);
6330     Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget);
6331   }
6332 
6333   SDLoc DL(Op);
6334   SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL);
6335   if (!IsFixed)
6336     return Val;
6337   return convertFromScalableVector(VT, Val, DAG, Subtarget);
6338 }
6339 
6340 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
6341 // matched to a RVV indexed load. The RVV indexed load instructions only
6342 // support the "unsigned unscaled" addressing mode; indices are implicitly
6343 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6344 // signed or scaled indexing is extended to the XLEN value type and scaled
6345 // accordingly.
6346 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
6347                                                SelectionDAG &DAG) const {
6348   SDLoc DL(Op);
6349   MVT VT = Op.getSimpleValueType();
6350 
6351   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6352   EVT MemVT = MemSD->getMemoryVT();
6353   MachineMemOperand *MMO = MemSD->getMemOperand();
6354   SDValue Chain = MemSD->getChain();
6355   SDValue BasePtr = MemSD->getBasePtr();
6356 
6357   ISD::LoadExtType LoadExtType;
6358   SDValue Index, Mask, PassThru, VL;
6359 
6360   if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
6361     Index = VPGN->getIndex();
6362     Mask = VPGN->getMask();
6363     PassThru = DAG.getUNDEF(VT);
6364     VL = VPGN->getVectorLength();
6365     // VP doesn't support extending loads.
6366     LoadExtType = ISD::NON_EXTLOAD;
6367   } else {
6368     // Else it must be a MGATHER.
6369     auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
6370     Index = MGN->getIndex();
6371     Mask = MGN->getMask();
6372     PassThru = MGN->getPassThru();
6373     LoadExtType = MGN->getExtensionType();
6374   }
6375 
6376   MVT IndexVT = Index.getSimpleValueType();
6377   MVT XLenVT = Subtarget.getXLenVT();
6378 
6379   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6380          "Unexpected VTs!");
6381   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6382   // Targets have to explicitly opt-in for extending vector loads.
6383   assert(LoadExtType == ISD::NON_EXTLOAD &&
6384          "Unexpected extending MGATHER/VP_GATHER");
6385   (void)LoadExtType;
6386 
6387   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6388   // the selection of the masked intrinsics doesn't do this for us.
6389   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6390 
6391   MVT ContainerVT = VT;
6392   if (VT.isFixedLengthVector()) {
6393     // We need to use the larger of the result and index type to determine the
6394     // scalable type to use so we don't increase LMUL for any operand/result.
6395     if (VT.bitsGE(IndexVT)) {
6396       ContainerVT = getContainerForFixedLengthVector(VT);
6397       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6398                                  ContainerVT.getVectorElementCount());
6399     } else {
6400       IndexVT = getContainerForFixedLengthVector(IndexVT);
6401       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
6402                                      IndexVT.getVectorElementCount());
6403     }
6404 
6405     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6406 
6407     if (!IsUnmasked) {
6408       MVT MaskVT =
6409           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6410       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6411       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
6412     }
6413   }
6414 
6415   if (!VL)
6416     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6417 
6418   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6419     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6420     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6421                                    VL);
6422     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6423                         TrueMask, VL);
6424   }
6425 
6426   unsigned IntID =
6427       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
6428   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6429   if (IsUnmasked)
6430     Ops.push_back(DAG.getUNDEF(ContainerVT));
6431   else
6432     Ops.push_back(PassThru);
6433   Ops.push_back(BasePtr);
6434   Ops.push_back(Index);
6435   if (!IsUnmasked)
6436     Ops.push_back(Mask);
6437   Ops.push_back(VL);
6438   if (!IsUnmasked)
6439     Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
6440 
6441   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
6442   SDValue Result =
6443       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
6444   Chain = Result.getValue(1);
6445 
6446   if (VT.isFixedLengthVector())
6447     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
6448 
6449   return DAG.getMergeValues({Result, Chain}, DL);
6450 }
6451 
6452 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
6453 // matched to a RVV indexed store. The RVV indexed store instructions only
6454 // support the "unsigned unscaled" addressing mode; indices are implicitly
6455 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
6456 // signed or scaled indexing is extended to the XLEN value type and scaled
6457 // accordingly.
6458 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
6459                                                 SelectionDAG &DAG) const {
6460   SDLoc DL(Op);
6461   const auto *MemSD = cast<MemSDNode>(Op.getNode());
6462   EVT MemVT = MemSD->getMemoryVT();
6463   MachineMemOperand *MMO = MemSD->getMemOperand();
6464   SDValue Chain = MemSD->getChain();
6465   SDValue BasePtr = MemSD->getBasePtr();
6466 
6467   bool IsTruncatingStore = false;
6468   SDValue Index, Mask, Val, VL;
6469 
6470   if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
6471     Index = VPSN->getIndex();
6472     Mask = VPSN->getMask();
6473     Val = VPSN->getValue();
6474     VL = VPSN->getVectorLength();
6475     // VP doesn't support truncating stores.
6476     IsTruncatingStore = false;
6477   } else {
6478     // Else it must be a MSCATTER.
6479     auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
6480     Index = MSN->getIndex();
6481     Mask = MSN->getMask();
6482     Val = MSN->getValue();
6483     IsTruncatingStore = MSN->isTruncatingStore();
6484   }
6485 
6486   MVT VT = Val.getSimpleValueType();
6487   MVT IndexVT = Index.getSimpleValueType();
6488   MVT XLenVT = Subtarget.getXLenVT();
6489 
6490   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
6491          "Unexpected VTs!");
6492   assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
6493   // Targets have to explicitly opt-in for extending vector loads and
6494   // truncating vector stores.
6495   assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
6496   (void)IsTruncatingStore;
6497 
6498   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
6499   // the selection of the masked intrinsics doesn't do this for us.
6500   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
6501 
6502   MVT ContainerVT = VT;
6503   if (VT.isFixedLengthVector()) {
6504     // We need to use the larger of the value and index type to determine the
6505     // scalable type to use so we don't increase LMUL for any operand/result.
6506     if (VT.bitsGE(IndexVT)) {
6507       ContainerVT = getContainerForFixedLengthVector(VT);
6508       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
6509                                  ContainerVT.getVectorElementCount());
6510     } else {
6511       IndexVT = getContainerForFixedLengthVector(IndexVT);
6512       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
6513                                      IndexVT.getVectorElementCount());
6514     }
6515 
6516     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
6517     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
6518 
6519     if (!IsUnmasked) {
6520       MVT MaskVT =
6521           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
6522       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
6523     }
6524   }
6525 
6526   if (!VL)
6527     VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
6528 
6529   if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) {
6530     IndexVT = IndexVT.changeVectorElementType(XLenVT);
6531     SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(),
6532                                    VL);
6533     Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index,
6534                         TrueMask, VL);
6535   }
6536 
6537   unsigned IntID =
6538       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
6539   SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
6540   Ops.push_back(Val);
6541   Ops.push_back(BasePtr);
6542   Ops.push_back(Index);
6543   if (!IsUnmasked)
6544     Ops.push_back(Mask);
6545   Ops.push_back(VL);
6546 
6547   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
6548                                  DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
6549 }
6550 
6551 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
6552                                                SelectionDAG &DAG) const {
6553   const MVT XLenVT = Subtarget.getXLenVT();
6554   SDLoc DL(Op);
6555   SDValue Chain = Op->getOperand(0);
6556   SDValue SysRegNo = DAG.getTargetConstant(
6557       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6558   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
6559   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
6560 
6561   // Encoding used for rounding mode in RISCV differs from that used in
6562   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
6563   // table, which consists of a sequence of 4-bit fields, each representing
6564   // corresponding FLT_ROUNDS mode.
6565   static const int Table =
6566       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
6567       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
6568       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
6569       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
6570       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
6571 
6572   SDValue Shift =
6573       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
6574   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6575                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6576   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6577                                DAG.getConstant(7, DL, XLenVT));
6578 
6579   return DAG.getMergeValues({Masked, Chain}, DL);
6580 }
6581 
6582 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
6583                                                SelectionDAG &DAG) const {
6584   const MVT XLenVT = Subtarget.getXLenVT();
6585   SDLoc DL(Op);
6586   SDValue Chain = Op->getOperand(0);
6587   SDValue RMValue = Op->getOperand(1);
6588   SDValue SysRegNo = DAG.getTargetConstant(
6589       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
6590 
6591   // Encoding used for rounding mode in RISCV differs from that used in
6592   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
6593   // a table, which consists of a sequence of 4-bit fields, each representing
6594   // corresponding RISCV mode.
6595   static const unsigned Table =
6596       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
6597       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
6598       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
6599       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
6600       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
6601 
6602   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
6603                               DAG.getConstant(2, DL, XLenVT));
6604   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
6605                                 DAG.getConstant(Table, DL, XLenVT), Shift);
6606   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
6607                         DAG.getConstant(0x7, DL, XLenVT));
6608   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
6609                      RMValue);
6610 }
6611 
6612 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
6613   switch (IntNo) {
6614   default:
6615     llvm_unreachable("Unexpected Intrinsic");
6616   case Intrinsic::riscv_bcompress:
6617     return RISCVISD::BCOMPRESSW;
6618   case Intrinsic::riscv_bdecompress:
6619     return RISCVISD::BDECOMPRESSW;
6620   case Intrinsic::riscv_bfp:
6621     return RISCVISD::BFPW;
6622   case Intrinsic::riscv_fsl:
6623     return RISCVISD::FSLW;
6624   case Intrinsic::riscv_fsr:
6625     return RISCVISD::FSRW;
6626   }
6627 }
6628 
6629 // Converts the given intrinsic to a i64 operation with any extension.
6630 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG,
6631                                          unsigned IntNo) {
6632   SDLoc DL(N);
6633   RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo);
6634   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6635   SDValue NewOp2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
6636   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp1, NewOp2);
6637   // ReplaceNodeResults requires we maintain the same type for the return value.
6638   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6639 }
6640 
6641 // Returns the opcode of the target-specific SDNode that implements the 32-bit
6642 // form of the given Opcode.
6643 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
6644   switch (Opcode) {
6645   default:
6646     llvm_unreachable("Unexpected opcode");
6647   case ISD::SHL:
6648     return RISCVISD::SLLW;
6649   case ISD::SRA:
6650     return RISCVISD::SRAW;
6651   case ISD::SRL:
6652     return RISCVISD::SRLW;
6653   case ISD::SDIV:
6654     return RISCVISD::DIVW;
6655   case ISD::UDIV:
6656     return RISCVISD::DIVUW;
6657   case ISD::UREM:
6658     return RISCVISD::REMUW;
6659   case ISD::ROTL:
6660     return RISCVISD::ROLW;
6661   case ISD::ROTR:
6662     return RISCVISD::RORW;
6663   }
6664 }
6665 
6666 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
6667 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
6668 // otherwise be promoted to i64, making it difficult to select the
6669 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
6670 // type i8/i16/i32 is lost.
6671 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
6672                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
6673   SDLoc DL(N);
6674   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
6675   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
6676   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
6677   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
6678   // ReplaceNodeResults requires we maintain the same type for the return value.
6679   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
6680 }
6681 
6682 // Converts the given 32-bit operation to a i64 operation with signed extension
6683 // semantic to reduce the signed extension instructions.
6684 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
6685   SDLoc DL(N);
6686   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6687   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6688   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
6689   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6690                                DAG.getValueType(MVT::i32));
6691   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
6692 }
6693 
6694 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
6695                                              SmallVectorImpl<SDValue> &Results,
6696                                              SelectionDAG &DAG) const {
6697   SDLoc DL(N);
6698   switch (N->getOpcode()) {
6699   default:
6700     llvm_unreachable("Don't know how to custom type legalize this operation!");
6701   case ISD::STRICT_FP_TO_SINT:
6702   case ISD::STRICT_FP_TO_UINT:
6703   case ISD::FP_TO_SINT:
6704   case ISD::FP_TO_UINT: {
6705     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6706            "Unexpected custom legalisation");
6707     bool IsStrict = N->isStrictFPOpcode();
6708     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
6709                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
6710     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
6711     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
6712         TargetLowering::TypeSoftenFloat) {
6713       if (!isTypeLegal(Op0.getValueType()))
6714         return;
6715       if (IsStrict) {
6716         unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64
6717                                 : RISCVISD::STRICT_FCVT_WU_RV64;
6718         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
6719         SDValue Res = DAG.getNode(
6720             Opc, DL, VTs, N->getOperand(0), Op0,
6721             DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6722         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6723         Results.push_back(Res.getValue(1));
6724         return;
6725       }
6726       unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
6727       SDValue Res =
6728           DAG.getNode(Opc, DL, MVT::i64, Op0,
6729                       DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64));
6730       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6731       return;
6732     }
6733     // If the FP type needs to be softened, emit a library call using the 'si'
6734     // version. If we left it to default legalization we'd end up with 'di'. If
6735     // the FP type doesn't need to be softened just let generic type
6736     // legalization promote the result type.
6737     RTLIB::Libcall LC;
6738     if (IsSigned)
6739       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
6740     else
6741       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
6742     MakeLibCallOptions CallOptions;
6743     EVT OpVT = Op0.getValueType();
6744     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
6745     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
6746     SDValue Result;
6747     std::tie(Result, Chain) =
6748         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
6749     Results.push_back(Result);
6750     if (IsStrict)
6751       Results.push_back(Chain);
6752     break;
6753   }
6754   case ISD::READCYCLECOUNTER: {
6755     assert(!Subtarget.is64Bit() &&
6756            "READCYCLECOUNTER only has custom type legalization on riscv32");
6757 
6758     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
6759     SDValue RCW =
6760         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
6761 
6762     Results.push_back(
6763         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
6764     Results.push_back(RCW.getValue(2));
6765     break;
6766   }
6767   case ISD::MUL: {
6768     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
6769     unsigned XLen = Subtarget.getXLen();
6770     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
6771     if (Size > XLen) {
6772       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
6773       SDValue LHS = N->getOperand(0);
6774       SDValue RHS = N->getOperand(1);
6775       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
6776 
6777       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
6778       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
6779       // We need exactly one side to be unsigned.
6780       if (LHSIsU == RHSIsU)
6781         return;
6782 
6783       auto MakeMULPair = [&](SDValue S, SDValue U) {
6784         MVT XLenVT = Subtarget.getXLenVT();
6785         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
6786         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
6787         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
6788         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
6789         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
6790       };
6791 
6792       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
6793       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
6794 
6795       // The other operand should be signed, but still prefer MULH when
6796       // possible.
6797       if (RHSIsU && LHSIsS && !RHSIsS)
6798         Results.push_back(MakeMULPair(LHS, RHS));
6799       else if (LHSIsU && RHSIsS && !LHSIsS)
6800         Results.push_back(MakeMULPair(RHS, LHS));
6801 
6802       return;
6803     }
6804     LLVM_FALLTHROUGH;
6805   }
6806   case ISD::ADD:
6807   case ISD::SUB:
6808     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6809            "Unexpected custom legalisation");
6810     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
6811     break;
6812   case ISD::SHL:
6813   case ISD::SRA:
6814   case ISD::SRL:
6815     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6816            "Unexpected custom legalisation");
6817     if (N->getOperand(1).getOpcode() != ISD::Constant) {
6818       Results.push_back(customLegalizeToWOp(N, DAG));
6819       break;
6820     }
6821 
6822     // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
6823     // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
6824     // shift amount.
6825     if (N->getOpcode() == ISD::SHL) {
6826       SDLoc DL(N);
6827       SDValue NewOp0 =
6828           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6829       SDValue NewOp1 =
6830           DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
6831       SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
6832       SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
6833                                    DAG.getValueType(MVT::i32));
6834       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6835     }
6836 
6837     break;
6838   case ISD::ROTL:
6839   case ISD::ROTR:
6840     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6841            "Unexpected custom legalisation");
6842     Results.push_back(customLegalizeToWOp(N, DAG));
6843     break;
6844   case ISD::CTTZ:
6845   case ISD::CTTZ_ZERO_UNDEF:
6846   case ISD::CTLZ:
6847   case ISD::CTLZ_ZERO_UNDEF: {
6848     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6849            "Unexpected custom legalisation");
6850 
6851     SDValue NewOp0 =
6852         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6853     bool IsCTZ =
6854         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
6855     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
6856     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
6857     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6858     return;
6859   }
6860   case ISD::SDIV:
6861   case ISD::UDIV:
6862   case ISD::UREM: {
6863     MVT VT = N->getSimpleValueType(0);
6864     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
6865            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
6866            "Unexpected custom legalisation");
6867     // Don't promote division/remainder by constant since we should expand those
6868     // to multiply by magic constant.
6869     // FIXME: What if the expansion is disabled for minsize.
6870     if (N->getOperand(1).getOpcode() == ISD::Constant)
6871       return;
6872 
6873     // If the input is i32, use ANY_EXTEND since the W instructions don't read
6874     // the upper 32 bits. For other types we need to sign or zero extend
6875     // based on the opcode.
6876     unsigned ExtOpc = ISD::ANY_EXTEND;
6877     if (VT != MVT::i32)
6878       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
6879                                            : ISD::ZERO_EXTEND;
6880 
6881     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
6882     break;
6883   }
6884   case ISD::UADDO:
6885   case ISD::USUBO: {
6886     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6887            "Unexpected custom legalisation");
6888     bool IsAdd = N->getOpcode() == ISD::UADDO;
6889     // Create an ADDW or SUBW.
6890     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6891     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
6892     SDValue Res =
6893         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
6894     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
6895                       DAG.getValueType(MVT::i32));
6896 
6897     SDValue Overflow;
6898     if (IsAdd && isOneConstant(RHS)) {
6899       // Special case uaddo X, 1 overflowed if the addition result is 0.
6900       // FIXME: We can do this for any constant RHS by using (X + C) < C.
6901       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
6902                               DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
6903     } else {
6904       // Sign extend the LHS and perform an unsigned compare with the ADDW
6905       // result. Since the inputs are sign extended from i32, this is equivalent
6906       // to comparing the lower 32 bits.
6907       LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6908       Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
6909                               IsAdd ? ISD::SETULT : ISD::SETUGT);
6910     }
6911 
6912     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6913     Results.push_back(Overflow);
6914     return;
6915   }
6916   case ISD::UADDSAT:
6917   case ISD::USUBSAT: {
6918     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6919            "Unexpected custom legalisation");
6920     if (Subtarget.hasStdExtZbb()) {
6921       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
6922       // sign extend allows overflow of the lower 32 bits to be detected on
6923       // the promoted size.
6924       SDValue LHS =
6925           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6926       SDValue RHS =
6927           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
6928       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
6929       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
6930       return;
6931     }
6932 
6933     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
6934     // promotion for UADDO/USUBO.
6935     Results.push_back(expandAddSubSat(N, DAG));
6936     return;
6937   }
6938   case ISD::ABS: {
6939     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
6940            "Unexpected custom legalisation");
6941           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
6942 
6943     // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y)
6944 
6945     SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
6946 
6947     // Freeze the source so we can increase it's use count.
6948     Src = DAG.getFreeze(Src);
6949 
6950     // Copy sign bit to all bits using the sraiw pattern.
6951     SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src,
6952                                    DAG.getValueType(MVT::i32));
6953     SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill,
6954                            DAG.getConstant(31, DL, MVT::i64));
6955 
6956     SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill);
6957     NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill);
6958 
6959     // NOTE: The result is only required to be anyextended, but sext is
6960     // consistent with type legalization of sub.
6961     NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes,
6962                          DAG.getValueType(MVT::i32));
6963     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
6964     return;
6965   }
6966   case ISD::BITCAST: {
6967     EVT VT = N->getValueType(0);
6968     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
6969     SDValue Op0 = N->getOperand(0);
6970     EVT Op0VT = Op0.getValueType();
6971     MVT XLenVT = Subtarget.getXLenVT();
6972     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
6973       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
6974       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
6975     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
6976                Subtarget.hasStdExtF()) {
6977       SDValue FPConv =
6978           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
6979       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
6980     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
6981                isTypeLegal(Op0VT)) {
6982       // Custom-legalize bitcasts from fixed-length vector types to illegal
6983       // scalar types in order to improve codegen. Bitcast the vector to a
6984       // one-element vector type whose element type is the same as the result
6985       // type, and extract the first element.
6986       EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1);
6987       if (isTypeLegal(BVT)) {
6988         SDValue BVec = DAG.getBitcast(BVT, Op0);
6989         Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
6990                                       DAG.getConstant(0, DL, XLenVT)));
6991       }
6992     }
6993     break;
6994   }
6995   case RISCVISD::GREV:
6996   case RISCVISD::GORC:
6997   case RISCVISD::SHFL: {
6998     MVT VT = N->getSimpleValueType(0);
6999     MVT XLenVT = Subtarget.getXLenVT();
7000     assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
7001            "Unexpected custom legalisation");
7002     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
7003     assert((Subtarget.hasStdExtZbp() ||
7004             (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
7005              N->getConstantOperandVal(1) == 7)) &&
7006            "Unexpected extension");
7007     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7008     SDValue NewOp1 =
7009         DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
7010     SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
7011     // ReplaceNodeResults requires we maintain the same type for the return
7012     // value.
7013     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
7014     break;
7015   }
7016   case ISD::BSWAP:
7017   case ISD::BITREVERSE: {
7018     MVT VT = N->getSimpleValueType(0);
7019     MVT XLenVT = Subtarget.getXLenVT();
7020     assert((VT == MVT::i8 || VT == MVT::i16 ||
7021             (VT == MVT::i32 && Subtarget.is64Bit())) &&
7022            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
7023     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
7024     unsigned Imm = VT.getSizeInBits() - 1;
7025     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
7026     if (N->getOpcode() == ISD::BSWAP)
7027       Imm &= ~0x7U;
7028     SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0,
7029                                 DAG.getConstant(Imm, DL, XLenVT));
7030     // ReplaceNodeResults requires we maintain the same type for the return
7031     // value.
7032     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
7033     break;
7034   }
7035   case ISD::FSHL:
7036   case ISD::FSHR: {
7037     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7038            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
7039     SDValue NewOp0 =
7040         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
7041     SDValue NewOp1 =
7042         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7043     SDValue NewShAmt =
7044         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7045     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
7046     // Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
7047     NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
7048                            DAG.getConstant(0x1f, DL, MVT::i64));
7049     // fshl and fshr concatenate their operands in the same order. fsrw and fslw
7050     // instruction use different orders. fshl will return its first operand for
7051     // shift of zero, fshr will return its second operand. fsl and fsr both
7052     // return rs1 so the ISD nodes need to have different operand orders.
7053     // Shift amount is in rs2.
7054     unsigned Opc = RISCVISD::FSLW;
7055     if (N->getOpcode() == ISD::FSHR) {
7056       std::swap(NewOp0, NewOp1);
7057       Opc = RISCVISD::FSRW;
7058     }
7059     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
7060     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
7061     break;
7062   }
7063   case ISD::EXTRACT_VECTOR_ELT: {
7064     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
7065     // type is illegal (currently only vXi64 RV32).
7066     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
7067     // transferred to the destination register. We issue two of these from the
7068     // upper- and lower- halves of the SEW-bit vector element, slid down to the
7069     // first element.
7070     SDValue Vec = N->getOperand(0);
7071     SDValue Idx = N->getOperand(1);
7072 
7073     // The vector type hasn't been legalized yet so we can't issue target
7074     // specific nodes if it needs legalization.
7075     // FIXME: We would manually legalize if it's important.
7076     if (!isTypeLegal(Vec.getValueType()))
7077       return;
7078 
7079     MVT VecVT = Vec.getSimpleValueType();
7080 
7081     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
7082            VecVT.getVectorElementType() == MVT::i64 &&
7083            "Unexpected EXTRACT_VECTOR_ELT legalization");
7084 
7085     // If this is a fixed vector, we need to convert it to a scalable vector.
7086     MVT ContainerVT = VecVT;
7087     if (VecVT.isFixedLengthVector()) {
7088       ContainerVT = getContainerForFixedLengthVector(VecVT);
7089       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
7090     }
7091 
7092     MVT XLenVT = Subtarget.getXLenVT();
7093 
7094     // Use a VL of 1 to avoid processing more elements than we need.
7095     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
7096     SDValue VL = DAG.getConstant(1, DL, XLenVT);
7097     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
7098 
7099     // Unless the index is known to be 0, we must slide the vector down to get
7100     // the desired element into index 0.
7101     if (!isNullConstant(Idx)) {
7102       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
7103                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
7104     }
7105 
7106     // Extract the lower XLEN bits of the correct vector element.
7107     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7108 
7109     // To extract the upper XLEN bits of the vector element, shift the first
7110     // element right by 32 bits and re-extract the lower XLEN bits.
7111     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
7112                                      DAG.getUNDEF(ContainerVT),
7113                                      DAG.getConstant(32, DL, XLenVT), VL);
7114     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
7115                                  ThirtyTwoV, Mask, VL);
7116 
7117     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7118 
7119     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7120     break;
7121   }
7122   case ISD::INTRINSIC_WO_CHAIN: {
7123     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
7124     switch (IntNo) {
7125     default:
7126       llvm_unreachable(
7127           "Don't know how to custom type legalize this intrinsic!");
7128     case Intrinsic::riscv_grev:
7129     case Intrinsic::riscv_gorc: {
7130       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7131              "Unexpected custom legalisation");
7132       SDValue NewOp1 =
7133           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7134       SDValue NewOp2 =
7135           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7136       unsigned Opc =
7137           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
7138       // If the control is a constant, promote the node by clearing any extra
7139       // bits bits in the control. isel will form greviw/gorciw if the result is
7140       // sign extended.
7141       if (isa<ConstantSDNode>(NewOp2)) {
7142         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7143                              DAG.getConstant(0x1f, DL, MVT::i64));
7144         Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
7145       }
7146       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7147       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7148       break;
7149     }
7150     case Intrinsic::riscv_bcompress:
7151     case Intrinsic::riscv_bdecompress:
7152     case Intrinsic::riscv_bfp: {
7153       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7154              "Unexpected custom legalisation");
7155       Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
7156       break;
7157     }
7158     case Intrinsic::riscv_fsl:
7159     case Intrinsic::riscv_fsr: {
7160       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7161              "Unexpected custom legalisation");
7162       SDValue NewOp1 =
7163           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7164       SDValue NewOp2 =
7165           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7166       SDValue NewOp3 =
7167           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
7168       unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
7169       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
7170       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7171       break;
7172     }
7173     case Intrinsic::riscv_orc_b: {
7174       // Lower to the GORCI encoding for orc.b with the operand extended.
7175       SDValue NewOp =
7176           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7177       SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp,
7178                                 DAG.getConstant(7, DL, MVT::i64));
7179       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7180       return;
7181     }
7182     case Intrinsic::riscv_shfl:
7183     case Intrinsic::riscv_unshfl: {
7184       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
7185              "Unexpected custom legalisation");
7186       SDValue NewOp1 =
7187           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
7188       SDValue NewOp2 =
7189           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
7190       unsigned Opc =
7191           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
7192       // There is no (UN)SHFLIW. If the control word is a constant, we can use
7193       // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half
7194       // will be shuffled the same way as the lower 32 bit half, but the two
7195       // halves won't cross.
7196       if (isa<ConstantSDNode>(NewOp2)) {
7197         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
7198                              DAG.getConstant(0xf, DL, MVT::i64));
7199         Opc =
7200             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
7201       }
7202       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
7203       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
7204       break;
7205     }
7206     case Intrinsic::riscv_vmv_x_s: {
7207       EVT VT = N->getValueType(0);
7208       MVT XLenVT = Subtarget.getXLenVT();
7209       if (VT.bitsLT(XLenVT)) {
7210         // Simple case just extract using vmv.x.s and truncate.
7211         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
7212                                       Subtarget.getXLenVT(), N->getOperand(1));
7213         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
7214         return;
7215       }
7216 
7217       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
7218              "Unexpected custom legalization");
7219 
7220       // We need to do the move in two steps.
7221       SDValue Vec = N->getOperand(1);
7222       MVT VecVT = Vec.getSimpleValueType();
7223 
7224       // First extract the lower XLEN bits of the element.
7225       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
7226 
7227       // To extract the upper XLEN bits of the vector element, shift the first
7228       // element right by 32 bits and re-extract the lower XLEN bits.
7229       SDValue VL = DAG.getConstant(1, DL, XLenVT);
7230       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7231       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
7232       SDValue ThirtyTwoV =
7233           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT),
7234                       DAG.getConstant(32, DL, XLenVT), VL);
7235       SDValue LShr32 =
7236           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
7237       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
7238 
7239       Results.push_back(
7240           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
7241       break;
7242     }
7243     }
7244     break;
7245   }
7246   case ISD::VECREDUCE_ADD:
7247   case ISD::VECREDUCE_AND:
7248   case ISD::VECREDUCE_OR:
7249   case ISD::VECREDUCE_XOR:
7250   case ISD::VECREDUCE_SMAX:
7251   case ISD::VECREDUCE_UMAX:
7252   case ISD::VECREDUCE_SMIN:
7253   case ISD::VECREDUCE_UMIN:
7254     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
7255       Results.push_back(V);
7256     break;
7257   case ISD::VP_REDUCE_ADD:
7258   case ISD::VP_REDUCE_AND:
7259   case ISD::VP_REDUCE_OR:
7260   case ISD::VP_REDUCE_XOR:
7261   case ISD::VP_REDUCE_SMAX:
7262   case ISD::VP_REDUCE_UMAX:
7263   case ISD::VP_REDUCE_SMIN:
7264   case ISD::VP_REDUCE_UMIN:
7265     if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
7266       Results.push_back(V);
7267     break;
7268   case ISD::FLT_ROUNDS_: {
7269     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
7270     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
7271     Results.push_back(Res.getValue(0));
7272     Results.push_back(Res.getValue(1));
7273     break;
7274   }
7275   }
7276 }
7277 
7278 // A structure to hold one of the bit-manipulation patterns below. Together, a
7279 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
7280 //   (or (and (shl x, 1), 0xAAAAAAAA),
7281 //       (and (srl x, 1), 0x55555555))
7282 struct RISCVBitmanipPat {
7283   SDValue Op;
7284   unsigned ShAmt;
7285   bool IsSHL;
7286 
7287   bool formsPairWith(const RISCVBitmanipPat &Other) const {
7288     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
7289   }
7290 };
7291 
7292 // Matches patterns of the form
7293 //   (and (shl x, C2), (C1 << C2))
7294 //   (and (srl x, C2), C1)
7295 //   (shl (and x, C1), C2)
7296 //   (srl (and x, (C1 << C2)), C2)
7297 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
7298 // The expected masks for each shift amount are specified in BitmanipMasks where
7299 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
7300 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
7301 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
7302 // XLen is 64.
7303 static Optional<RISCVBitmanipPat>
7304 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
7305   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
7306          "Unexpected number of masks");
7307   Optional<uint64_t> Mask;
7308   // Optionally consume a mask around the shift operation.
7309   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
7310     Mask = Op.getConstantOperandVal(1);
7311     Op = Op.getOperand(0);
7312   }
7313   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
7314     return None;
7315   bool IsSHL = Op.getOpcode() == ISD::SHL;
7316 
7317   if (!isa<ConstantSDNode>(Op.getOperand(1)))
7318     return None;
7319   uint64_t ShAmt = Op.getConstantOperandVal(1);
7320 
7321   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7322   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
7323     return None;
7324   // If we don't have enough masks for 64 bit, then we must be trying to
7325   // match SHFL so we're only allowed to shift 1/4 of the width.
7326   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
7327     return None;
7328 
7329   SDValue Src = Op.getOperand(0);
7330 
7331   // The expected mask is shifted left when the AND is found around SHL
7332   // patterns.
7333   //   ((x >> 1) & 0x55555555)
7334   //   ((x << 1) & 0xAAAAAAAA)
7335   bool SHLExpMask = IsSHL;
7336 
7337   if (!Mask) {
7338     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
7339     // the mask is all ones: consume that now.
7340     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
7341       Mask = Src.getConstantOperandVal(1);
7342       Src = Src.getOperand(0);
7343       // The expected mask is now in fact shifted left for SRL, so reverse the
7344       // decision.
7345       //   ((x & 0xAAAAAAAA) >> 1)
7346       //   ((x & 0x55555555) << 1)
7347       SHLExpMask = !SHLExpMask;
7348     } else {
7349       // Use a default shifted mask of all-ones if there's no AND, truncated
7350       // down to the expected width. This simplifies the logic later on.
7351       Mask = maskTrailingOnes<uint64_t>(Width);
7352       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
7353     }
7354   }
7355 
7356   unsigned MaskIdx = Log2_32(ShAmt);
7357   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7358 
7359   if (SHLExpMask)
7360     ExpMask <<= ShAmt;
7361 
7362   if (Mask != ExpMask)
7363     return None;
7364 
7365   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
7366 }
7367 
7368 // Matches any of the following bit-manipulation patterns:
7369 //   (and (shl x, 1), (0x55555555 << 1))
7370 //   (and (srl x, 1), 0x55555555)
7371 //   (shl (and x, 0x55555555), 1)
7372 //   (srl (and x, (0x55555555 << 1)), 1)
7373 // where the shift amount and mask may vary thus:
7374 //   [1]  = 0x55555555 / 0xAAAAAAAA
7375 //   [2]  = 0x33333333 / 0xCCCCCCCC
7376 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
7377 //   [8]  = 0x00FF00FF / 0xFF00FF00
7378 //   [16] = 0x0000FFFF / 0xFFFFFFFF
7379 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
7380 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
7381   // These are the unshifted masks which we use to match bit-manipulation
7382   // patterns. They may be shifted left in certain circumstances.
7383   static const uint64_t BitmanipMasks[] = {
7384       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
7385       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
7386 
7387   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7388 }
7389 
7390 // Match the following pattern as a GREVI(W) operation
7391 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
7392 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
7393                                const RISCVSubtarget &Subtarget) {
7394   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7395   EVT VT = Op.getValueType();
7396 
7397   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7398     auto LHS = matchGREVIPat(Op.getOperand(0));
7399     auto RHS = matchGREVIPat(Op.getOperand(1));
7400     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
7401       SDLoc DL(Op);
7402       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
7403                          DAG.getConstant(LHS->ShAmt, DL, VT));
7404     }
7405   }
7406   return SDValue();
7407 }
7408 
7409 // Matches any the following pattern as a GORCI(W) operation
7410 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
7411 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
7412 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
7413 // Note that with the variant of 3.,
7414 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
7415 // the inner pattern will first be matched as GREVI and then the outer
7416 // pattern will be matched to GORC via the first rule above.
7417 // 4.  (or (rotl/rotr x, bitwidth/2), x)
7418 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
7419                                const RISCVSubtarget &Subtarget) {
7420   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7421   EVT VT = Op.getValueType();
7422 
7423   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
7424     SDLoc DL(Op);
7425     SDValue Op0 = Op.getOperand(0);
7426     SDValue Op1 = Op.getOperand(1);
7427 
7428     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
7429       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
7430           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
7431           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
7432         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
7433       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
7434       if ((Reverse.getOpcode() == ISD::ROTL ||
7435            Reverse.getOpcode() == ISD::ROTR) &&
7436           Reverse.getOperand(0) == X &&
7437           isa<ConstantSDNode>(Reverse.getOperand(1))) {
7438         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
7439         if (RotAmt == (VT.getSizeInBits() / 2))
7440           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
7441                              DAG.getConstant(RotAmt, DL, VT));
7442       }
7443       return SDValue();
7444     };
7445 
7446     // Check for either commutable permutation of (or (GREVI x, shamt), x)
7447     if (SDValue V = MatchOROfReverse(Op0, Op1))
7448       return V;
7449     if (SDValue V = MatchOROfReverse(Op1, Op0))
7450       return V;
7451 
7452     // OR is commutable so canonicalize its OR operand to the left
7453     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
7454       std::swap(Op0, Op1);
7455     if (Op0.getOpcode() != ISD::OR)
7456       return SDValue();
7457     SDValue OrOp0 = Op0.getOperand(0);
7458     SDValue OrOp1 = Op0.getOperand(1);
7459     auto LHS = matchGREVIPat(OrOp0);
7460     // OR is commutable so swap the operands and try again: x might have been
7461     // on the left
7462     if (!LHS) {
7463       std::swap(OrOp0, OrOp1);
7464       LHS = matchGREVIPat(OrOp0);
7465     }
7466     auto RHS = matchGREVIPat(Op1);
7467     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
7468       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
7469                          DAG.getConstant(LHS->ShAmt, DL, VT));
7470     }
7471   }
7472   return SDValue();
7473 }
7474 
7475 // Matches any of the following bit-manipulation patterns:
7476 //   (and (shl x, 1), (0x22222222 << 1))
7477 //   (and (srl x, 1), 0x22222222)
7478 //   (shl (and x, 0x22222222), 1)
7479 //   (srl (and x, (0x22222222 << 1)), 1)
7480 // where the shift amount and mask may vary thus:
7481 //   [1]  = 0x22222222 / 0x44444444
7482 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
7483 //   [4]  = 0x00F000F0 / 0x0F000F00
7484 //   [8]  = 0x0000FF00 / 0x00FF0000
7485 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
7486 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
7487   // These are the unshifted masks which we use to match bit-manipulation
7488   // patterns. They may be shifted left in certain circumstances.
7489   static const uint64_t BitmanipMasks[] = {
7490       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
7491       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
7492 
7493   return matchRISCVBitmanipPat(Op, BitmanipMasks);
7494 }
7495 
7496 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
7497 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
7498                                const RISCVSubtarget &Subtarget) {
7499   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
7500   EVT VT = Op.getValueType();
7501 
7502   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
7503     return SDValue();
7504 
7505   SDValue Op0 = Op.getOperand(0);
7506   SDValue Op1 = Op.getOperand(1);
7507 
7508   // Or is commutable so canonicalize the second OR to the LHS.
7509   if (Op0.getOpcode() != ISD::OR)
7510     std::swap(Op0, Op1);
7511   if (Op0.getOpcode() != ISD::OR)
7512     return SDValue();
7513 
7514   // We found an inner OR, so our operands are the operands of the inner OR
7515   // and the other operand of the outer OR.
7516   SDValue A = Op0.getOperand(0);
7517   SDValue B = Op0.getOperand(1);
7518   SDValue C = Op1;
7519 
7520   auto Match1 = matchSHFLPat(A);
7521   auto Match2 = matchSHFLPat(B);
7522 
7523   // If neither matched, we failed.
7524   if (!Match1 && !Match2)
7525     return SDValue();
7526 
7527   // We had at least one match. if one failed, try the remaining C operand.
7528   if (!Match1) {
7529     std::swap(A, C);
7530     Match1 = matchSHFLPat(A);
7531     if (!Match1)
7532       return SDValue();
7533   } else if (!Match2) {
7534     std::swap(B, C);
7535     Match2 = matchSHFLPat(B);
7536     if (!Match2)
7537       return SDValue();
7538   }
7539   assert(Match1 && Match2);
7540 
7541   // Make sure our matches pair up.
7542   if (!Match1->formsPairWith(*Match2))
7543     return SDValue();
7544 
7545   // All the remains is to make sure C is an AND with the same input, that masks
7546   // out the bits that are being shuffled.
7547   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
7548       C.getOperand(0) != Match1->Op)
7549     return SDValue();
7550 
7551   uint64_t Mask = C.getConstantOperandVal(1);
7552 
7553   static const uint64_t BitmanipMasks[] = {
7554       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
7555       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
7556   };
7557 
7558   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
7559   unsigned MaskIdx = Log2_32(Match1->ShAmt);
7560   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
7561 
7562   if (Mask != ExpMask)
7563     return SDValue();
7564 
7565   SDLoc DL(Op);
7566   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
7567                      DAG.getConstant(Match1->ShAmt, DL, VT));
7568 }
7569 
7570 // Optimize (add (shl x, c0), (shl y, c1)) ->
7571 //          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
7572 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
7573                                   const RISCVSubtarget &Subtarget) {
7574   // Perform this optimization only in the zba extension.
7575   if (!Subtarget.hasStdExtZba())
7576     return SDValue();
7577 
7578   // Skip for vector types and larger types.
7579   EVT VT = N->getValueType(0);
7580   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7581     return SDValue();
7582 
7583   // The two operand nodes must be SHL and have no other use.
7584   SDValue N0 = N->getOperand(0);
7585   SDValue N1 = N->getOperand(1);
7586   if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
7587       !N0->hasOneUse() || !N1->hasOneUse())
7588     return SDValue();
7589 
7590   // Check c0 and c1.
7591   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7592   auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
7593   if (!N0C || !N1C)
7594     return SDValue();
7595   int64_t C0 = N0C->getSExtValue();
7596   int64_t C1 = N1C->getSExtValue();
7597   if (C0 <= 0 || C1 <= 0)
7598     return SDValue();
7599 
7600   // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
7601   int64_t Bits = std::min(C0, C1);
7602   int64_t Diff = std::abs(C0 - C1);
7603   if (Diff != 1 && Diff != 2 && Diff != 3)
7604     return SDValue();
7605 
7606   // Build nodes.
7607   SDLoc DL(N);
7608   SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
7609   SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
7610   SDValue NA0 =
7611       DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
7612   SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
7613   return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
7614 }
7615 
7616 // Combine
7617 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7618 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32
7619 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7620 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64
7621 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7622 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64
7623 // The grev patterns represents BSWAP.
7624 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB
7625 // off the grev.
7626 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG,
7627                                           const RISCVSubtarget &Subtarget) {
7628   bool IsWInstruction =
7629       N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW;
7630   assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL ||
7631           IsWInstruction) &&
7632          "Unexpected opcode!");
7633   SDValue Src = N->getOperand(0);
7634   EVT VT = N->getValueType(0);
7635   SDLoc DL(N);
7636 
7637   if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV)
7638     return SDValue();
7639 
7640   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7641       !isa<ConstantSDNode>(Src.getOperand(1)))
7642     return SDValue();
7643 
7644   unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits();
7645   assert(isPowerOf2_32(BitWidth) && "Expected a power of 2");
7646 
7647   // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for
7648   // RORW/ROLW. And the grev should be the encoding for bswap for this width.
7649   unsigned ShAmt1 = N->getConstantOperandVal(1);
7650   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7651   if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8))
7652     return SDValue();
7653 
7654   Src = Src.getOperand(0);
7655 
7656   // Toggle bit the MSB of the shift.
7657   unsigned CombinedShAmt = ShAmt1 ^ ShAmt2;
7658   if (CombinedShAmt == 0)
7659     return Src;
7660 
7661   SDValue Res = DAG.getNode(
7662       RISCVISD::GREV, DL, VT, Src,
7663       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7664   if (!IsWInstruction)
7665     return Res;
7666 
7667   // Sign extend the result to match the behavior of the rotate. This will be
7668   // selected to GREVIW in isel.
7669   return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res,
7670                      DAG.getValueType(MVT::i32));
7671 }
7672 
7673 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
7674 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
7675 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
7676 // not undo itself, but they are redundant.
7677 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
7678   bool IsGORC = N->getOpcode() == RISCVISD::GORC;
7679   assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode");
7680   SDValue Src = N->getOperand(0);
7681 
7682   if (Src.getOpcode() != N->getOpcode())
7683     return SDValue();
7684 
7685   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
7686       !isa<ConstantSDNode>(Src.getOperand(1)))
7687     return SDValue();
7688 
7689   unsigned ShAmt1 = N->getConstantOperandVal(1);
7690   unsigned ShAmt2 = Src.getConstantOperandVal(1);
7691   Src = Src.getOperand(0);
7692 
7693   unsigned CombinedShAmt;
7694   if (IsGORC)
7695     CombinedShAmt = ShAmt1 | ShAmt2;
7696   else
7697     CombinedShAmt = ShAmt1 ^ ShAmt2;
7698 
7699   if (CombinedShAmt == 0)
7700     return Src;
7701 
7702   SDLoc DL(N);
7703   return DAG.getNode(
7704       N->getOpcode(), DL, N->getValueType(0), Src,
7705       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
7706 }
7707 
7708 // Combine a constant select operand into its use:
7709 //
7710 // (and (select cond, -1, c), x)
7711 //   -> (select cond, x, (and x, c))  [AllOnes=1]
7712 // (or  (select cond, 0, c), x)
7713 //   -> (select cond, x, (or x, c))  [AllOnes=0]
7714 // (xor (select cond, 0, c), x)
7715 //   -> (select cond, x, (xor x, c))  [AllOnes=0]
7716 // (add (select cond, 0, c), x)
7717 //   -> (select cond, x, (add x, c))  [AllOnes=0]
7718 // (sub x, (select cond, 0, c))
7719 //   -> (select cond, x, (sub x, c))  [AllOnes=0]
7720 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
7721                                    SelectionDAG &DAG, bool AllOnes) {
7722   EVT VT = N->getValueType(0);
7723 
7724   // Skip vectors.
7725   if (VT.isVector())
7726     return SDValue();
7727 
7728   if ((Slct.getOpcode() != ISD::SELECT &&
7729        Slct.getOpcode() != RISCVISD::SELECT_CC) ||
7730       !Slct.hasOneUse())
7731     return SDValue();
7732 
7733   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
7734     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
7735   };
7736 
7737   bool SwapSelectOps;
7738   unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
7739   SDValue TrueVal = Slct.getOperand(1 + OpOffset);
7740   SDValue FalseVal = Slct.getOperand(2 + OpOffset);
7741   SDValue NonConstantVal;
7742   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
7743     SwapSelectOps = false;
7744     NonConstantVal = FalseVal;
7745   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
7746     SwapSelectOps = true;
7747     NonConstantVal = TrueVal;
7748   } else
7749     return SDValue();
7750 
7751   // Slct is now know to be the desired identity constant when CC is true.
7752   TrueVal = OtherOp;
7753   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
7754   // Unless SwapSelectOps says the condition should be false.
7755   if (SwapSelectOps)
7756     std::swap(TrueVal, FalseVal);
7757 
7758   if (Slct.getOpcode() == RISCVISD::SELECT_CC)
7759     return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
7760                        {Slct.getOperand(0), Slct.getOperand(1),
7761                         Slct.getOperand(2), TrueVal, FalseVal});
7762 
7763   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
7764                      {Slct.getOperand(0), TrueVal, FalseVal});
7765 }
7766 
7767 // Attempt combineSelectAndUse on each operand of a commutative operator N.
7768 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
7769                                               bool AllOnes) {
7770   SDValue N0 = N->getOperand(0);
7771   SDValue N1 = N->getOperand(1);
7772   if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
7773     return Result;
7774   if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
7775     return Result;
7776   return SDValue();
7777 }
7778 
7779 // Transform (add (mul x, c0), c1) ->
7780 //           (add (mul (add x, c1/c0), c0), c1%c0).
7781 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
7782 // that should be excluded is when c0*(c1/c0) is simm12, which will lead
7783 // to an infinite loop in DAGCombine if transformed.
7784 // Or transform (add (mul x, c0), c1) ->
7785 //              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
7786 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
7787 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will
7788 // lead to an infinite loop in DAGCombine if transformed.
7789 // Or transform (add (mul x, c0), c1) ->
7790 //              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
7791 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
7792 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will
7793 // lead to an infinite loop in DAGCombine if transformed.
7794 // Or transform (add (mul x, c0), c1) ->
7795 //              (mul (add x, c1/c0), c0).
7796 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
7797 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
7798                                      const RISCVSubtarget &Subtarget) {
7799   // Skip for vector types and larger types.
7800   EVT VT = N->getValueType(0);
7801   if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
7802     return SDValue();
7803   // The first operand node must be a MUL and has no other use.
7804   SDValue N0 = N->getOperand(0);
7805   if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
7806     return SDValue();
7807   // Check if c0 and c1 match above conditions.
7808   auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7809   auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
7810   if (!N0C || !N1C)
7811     return SDValue();
7812   // If N0C has multiple uses it's possible one of the cases in
7813   // DAGCombiner::isMulAddWithConstProfitable will be true, which would result
7814   // in an infinite loop.
7815   if (!N0C->hasOneUse())
7816     return SDValue();
7817   int64_t C0 = N0C->getSExtValue();
7818   int64_t C1 = N1C->getSExtValue();
7819   int64_t CA, CB;
7820   if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1))
7821     return SDValue();
7822   // Search for proper CA (non-zero) and CB that both are simm12.
7823   if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) &&
7824       !isInt<12>(C0 * (C1 / C0))) {
7825     CA = C1 / C0;
7826     CB = C1 % C0;
7827   } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) &&
7828              isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) {
7829     CA = C1 / C0 + 1;
7830     CB = C1 % C0 - C0;
7831   } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) &&
7832              isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) {
7833     CA = C1 / C0 - 1;
7834     CB = C1 % C0 + C0;
7835   } else
7836     return SDValue();
7837   // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
7838   SDLoc DL(N);
7839   SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
7840                              DAG.getConstant(CA, DL, VT));
7841   SDValue New1 =
7842       DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
7843   return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT));
7844 }
7845 
7846 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
7847                                  const RISCVSubtarget &Subtarget) {
7848   if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
7849     return V;
7850   if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
7851     return V;
7852   // fold (add (select lhs, rhs, cc, 0, y), x) ->
7853   //      (select lhs, rhs, cc, x, (add x, y))
7854   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7855 }
7856 
7857 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
7858   // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
7859   //      (select lhs, rhs, cc, x, (sub x, y))
7860   SDValue N0 = N->getOperand(0);
7861   SDValue N1 = N->getOperand(1);
7862   return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
7863 }
7864 
7865 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
7866   // fold (and (select lhs, rhs, cc, -1, y), x) ->
7867   //      (select lhs, rhs, cc, x, (and x, y))
7868   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
7869 }
7870 
7871 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
7872                                 const RISCVSubtarget &Subtarget) {
7873   if (Subtarget.hasStdExtZbp()) {
7874     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
7875       return GREV;
7876     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
7877       return GORC;
7878     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
7879       return SHFL;
7880   }
7881 
7882   // fold (or (select cond, 0, y), x) ->
7883   //      (select cond, x, (or x, y))
7884   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7885 }
7886 
7887 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
7888   // fold (xor (select cond, 0, y), x) ->
7889   //      (select cond, x, (xor x, y))
7890   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
7891 }
7892 
7893 static SDValue
7894 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
7895                                 const RISCVSubtarget &Subtarget) {
7896   SDValue Src = N->getOperand(0);
7897   EVT VT = N->getValueType(0);
7898 
7899   // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X)
7900   if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
7901       cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16))
7902     return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT,
7903                        Src.getOperand(0));
7904 
7905   // Fold (i64 (sext_inreg (abs X), i32)) ->
7906   // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits.
7907   // The (sext_inreg (neg X), i32) will be selected to negw by isel. This
7908   // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user
7909   // of the (i32 (abs X)) is a sext or setcc or something else that causes type
7910   // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have
7911   // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg
7912   // may get combined into an earlier operation so we need to use
7913   // ComputeNumSignBits.
7914   // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for
7915   // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so
7916   // we can't assume that X has 33 sign bits. We must check.
7917   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() &&
7918       Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 &&
7919       cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 &&
7920       DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) {
7921     SDLoc DL(N);
7922     SDValue Freeze = DAG.getFreeze(Src.getOperand(0));
7923     SDValue Neg =
7924         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze);
7925     Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg,
7926                       DAG.getValueType(MVT::i32));
7927     return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg);
7928   }
7929 
7930   return SDValue();
7931 }
7932 
7933 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to
7934 // vwadd(u).vv/vx or vwsub(u).vv/vx.
7935 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG,
7936                                              bool Commute = false) {
7937   assert((N->getOpcode() == RISCVISD::ADD_VL ||
7938           N->getOpcode() == RISCVISD::SUB_VL) &&
7939          "Unexpected opcode");
7940   bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL;
7941   SDValue Op0 = N->getOperand(0);
7942   SDValue Op1 = N->getOperand(1);
7943   if (Commute)
7944     std::swap(Op0, Op1);
7945 
7946   MVT VT = N->getSimpleValueType(0);
7947 
7948   // Determine the narrow size for a widening add/sub.
7949   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
7950   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
7951                                   VT.getVectorElementCount());
7952 
7953   SDValue Mask = N->getOperand(2);
7954   SDValue VL = N->getOperand(3);
7955 
7956   SDLoc DL(N);
7957 
7958   // If the RHS is a sext or zext, we can form a widening op.
7959   if ((Op1.getOpcode() == RISCVISD::VZEXT_VL ||
7960        Op1.getOpcode() == RISCVISD::VSEXT_VL) &&
7961       Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) {
7962     unsigned ExtOpc = Op1.getOpcode();
7963     Op1 = Op1.getOperand(0);
7964     // Re-introduce narrower extends if needed.
7965     if (Op1.getValueType() != NarrowVT)
7966       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
7967 
7968     unsigned WOpc;
7969     if (ExtOpc == RISCVISD::VSEXT_VL)
7970       WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL;
7971     else
7972       WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL;
7973 
7974     return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL);
7975   }
7976 
7977   // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar
7978   // sext/zext?
7979 
7980   return SDValue();
7981 }
7982 
7983 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or
7984 // vwsub(u).vv/vx.
7985 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) {
7986   SDValue Op0 = N->getOperand(0);
7987   SDValue Op1 = N->getOperand(1);
7988   SDValue Mask = N->getOperand(2);
7989   SDValue VL = N->getOperand(3);
7990 
7991   MVT VT = N->getSimpleValueType(0);
7992   MVT NarrowVT = Op1.getSimpleValueType();
7993   unsigned NarrowSize = NarrowVT.getScalarSizeInBits();
7994 
7995   unsigned VOpc;
7996   switch (N->getOpcode()) {
7997   default: llvm_unreachable("Unexpected opcode");
7998   case RISCVISD::VWADD_W_VL:  VOpc = RISCVISD::VWADD_VL;  break;
7999   case RISCVISD::VWSUB_W_VL:  VOpc = RISCVISD::VWSUB_VL;  break;
8000   case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break;
8001   case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break;
8002   }
8003 
8004   bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8005                   N->getOpcode() == RISCVISD::VWSUB_W_VL;
8006 
8007   SDLoc DL(N);
8008 
8009   // If the LHS is a sext or zext, we can narrow this op to the same size as
8010   // the RHS.
8011   if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) ||
8012        (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) &&
8013       Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) {
8014     unsigned ExtOpc = Op0.getOpcode();
8015     Op0 = Op0.getOperand(0);
8016     // Re-introduce narrower extends if needed.
8017     if (Op0.getValueType() != NarrowVT)
8018       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8019     return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL);
8020   }
8021 
8022   bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL ||
8023                N->getOpcode() == RISCVISD::VWADDU_W_VL;
8024 
8025   // Look for splats on the left hand side of a vwadd(u).wv. We might be able
8026   // to commute and use a vwadd(u).vx instead.
8027   if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL &&
8028       Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) {
8029     Op0 = Op0.getOperand(1);
8030 
8031     // See if have enough sign bits or zero bits in the scalar to use a
8032     // widening add/sub by splatting to smaller element size.
8033     unsigned EltBits = VT.getScalarSizeInBits();
8034     unsigned ScalarBits = Op0.getValueSizeInBits();
8035     // Make sure we're getting all element bits from the scalar register.
8036     // FIXME: Support implicit sign extension of vmv.v.x?
8037     if (ScalarBits < EltBits)
8038       return SDValue();
8039 
8040     if (IsSigned) {
8041       if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize))
8042         return SDValue();
8043     } else {
8044       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8045       if (!DAG.MaskedValueIsZero(Op0, Mask))
8046         return SDValue();
8047     }
8048 
8049     Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8050                       DAG.getUNDEF(NarrowVT), Op0, VL);
8051     return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL);
8052   }
8053 
8054   return SDValue();
8055 }
8056 
8057 // Try to form VWMUL, VWMULU or VWMULSU.
8058 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op.
8059 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG,
8060                                        bool Commute) {
8061   assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
8062   SDValue Op0 = N->getOperand(0);
8063   SDValue Op1 = N->getOperand(1);
8064   if (Commute)
8065     std::swap(Op0, Op1);
8066 
8067   bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
8068   bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
8069   bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL;
8070   if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
8071     return SDValue();
8072 
8073   SDValue Mask = N->getOperand(2);
8074   SDValue VL = N->getOperand(3);
8075 
8076   // Make sure the mask and VL match.
8077   if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
8078     return SDValue();
8079 
8080   MVT VT = N->getSimpleValueType(0);
8081 
8082   // Determine the narrow size for a widening multiply.
8083   unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
8084   MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
8085                                   VT.getVectorElementCount());
8086 
8087   SDLoc DL(N);
8088 
8089   // See if the other operand is the same opcode.
8090   if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) {
8091     if (!Op1.hasOneUse())
8092       return SDValue();
8093 
8094     // Make sure the mask and VL match.
8095     if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
8096       return SDValue();
8097 
8098     Op1 = Op1.getOperand(0);
8099   } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
8100     // The operand is a splat of a scalar.
8101 
8102     // The pasthru must be undef for tail agnostic
8103     if (!Op1.getOperand(0).isUndef())
8104       return SDValue();
8105     // The VL must be the same.
8106     if (Op1.getOperand(2) != VL)
8107       return SDValue();
8108 
8109     // Get the scalar value.
8110     Op1 = Op1.getOperand(1);
8111 
8112     // See if have enough sign bits or zero bits in the scalar to use a
8113     // widening multiply by splatting to smaller element size.
8114     unsigned EltBits = VT.getScalarSizeInBits();
8115     unsigned ScalarBits = Op1.getValueSizeInBits();
8116     // Make sure we're getting all element bits from the scalar register.
8117     // FIXME: Support implicit sign extension of vmv.v.x?
8118     if (ScalarBits < EltBits)
8119       return SDValue();
8120 
8121     // If the LHS is a sign extend, try to use vwmul.
8122     if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) {
8123       // Can use vwmul.
8124     } else {
8125       // Otherwise try to use vwmulu or vwmulsu.
8126       APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
8127       if (DAG.MaskedValueIsZero(Op1, Mask))
8128         IsVWMULSU = IsSignExt;
8129       else
8130         return SDValue();
8131     }
8132 
8133     Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
8134                       DAG.getUNDEF(NarrowVT), Op1, VL);
8135   } else
8136     return SDValue();
8137 
8138   Op0 = Op0.getOperand(0);
8139 
8140   // Re-introduce narrower extends if needed.
8141   unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
8142   if (Op0.getValueType() != NarrowVT)
8143     Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
8144   // vwmulsu requires second operand to be zero extended.
8145   ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc;
8146   if (Op1.getValueType() != NarrowVT)
8147     Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
8148 
8149   unsigned WMulOpc = RISCVISD::VWMULSU_VL;
8150   if (!IsVWMULSU)
8151     WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
8152   return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
8153 }
8154 
8155 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) {
8156   switch (Op.getOpcode()) {
8157   case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE;
8158   case ISD::FTRUNC:     return RISCVFPRndMode::RTZ;
8159   case ISD::FFLOOR:     return RISCVFPRndMode::RDN;
8160   case ISD::FCEIL:      return RISCVFPRndMode::RUP;
8161   case ISD::FROUND:     return RISCVFPRndMode::RMM;
8162   }
8163 
8164   return RISCVFPRndMode::Invalid;
8165 }
8166 
8167 // Fold
8168 //   (fp_to_int (froundeven X)) -> fcvt X, rne
8169 //   (fp_to_int (ftrunc X))     -> fcvt X, rtz
8170 //   (fp_to_int (ffloor X))     -> fcvt X, rdn
8171 //   (fp_to_int (fceil X))      -> fcvt X, rup
8172 //   (fp_to_int (fround X))     -> fcvt X, rmm
8173 static SDValue performFP_TO_INTCombine(SDNode *N,
8174                                        TargetLowering::DAGCombinerInfo &DCI,
8175                                        const RISCVSubtarget &Subtarget) {
8176   SelectionDAG &DAG = DCI.DAG;
8177   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8178   MVT XLenVT = Subtarget.getXLenVT();
8179 
8180   // Only handle XLen or i32 types. Other types narrower than XLen will
8181   // eventually be legalized to XLenVT.
8182   EVT VT = N->getValueType(0);
8183   if (VT != MVT::i32 && VT != XLenVT)
8184     return SDValue();
8185 
8186   SDValue Src = N->getOperand(0);
8187 
8188   // Ensure the FP type is also legal.
8189   if (!TLI.isTypeLegal(Src.getValueType()))
8190     return SDValue();
8191 
8192   // Don't do this for f16 with Zfhmin and not Zfh.
8193   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8194     return SDValue();
8195 
8196   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8197   if (FRM == RISCVFPRndMode::Invalid)
8198     return SDValue();
8199 
8200   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
8201 
8202   unsigned Opc;
8203   if (VT == XLenVT)
8204     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8205   else
8206     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8207 
8208   SDLoc DL(N);
8209   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0),
8210                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8211   return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt);
8212 }
8213 
8214 // Fold
8215 //   (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne))
8216 //   (fp_to_int_sat (ftrunc X))     -> (select X == nan, 0, (fcvt X, rtz))
8217 //   (fp_to_int_sat (ffloor X))     -> (select X == nan, 0, (fcvt X, rdn))
8218 //   (fp_to_int_sat (fceil X))      -> (select X == nan, 0, (fcvt X, rup))
8219 //   (fp_to_int_sat (fround X))     -> (select X == nan, 0, (fcvt X, rmm))
8220 static SDValue performFP_TO_INT_SATCombine(SDNode *N,
8221                                        TargetLowering::DAGCombinerInfo &DCI,
8222                                        const RISCVSubtarget &Subtarget) {
8223   SelectionDAG &DAG = DCI.DAG;
8224   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8225   MVT XLenVT = Subtarget.getXLenVT();
8226 
8227   // Only handle XLen types. Other types narrower than XLen will eventually be
8228   // legalized to XLenVT.
8229   EVT DstVT = N->getValueType(0);
8230   if (DstVT != XLenVT)
8231     return SDValue();
8232 
8233   SDValue Src = N->getOperand(0);
8234 
8235   // Ensure the FP type is also legal.
8236   if (!TLI.isTypeLegal(Src.getValueType()))
8237     return SDValue();
8238 
8239   // Don't do this for f16 with Zfhmin and not Zfh.
8240   if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh())
8241     return SDValue();
8242 
8243   EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT();
8244 
8245   RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src);
8246   if (FRM == RISCVFPRndMode::Invalid)
8247     return SDValue();
8248 
8249   bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT;
8250 
8251   unsigned Opc;
8252   if (SatVT == DstVT)
8253     Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU;
8254   else if (DstVT == MVT::i64 && SatVT == MVT::i32)
8255     Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64;
8256   else
8257     return SDValue();
8258   // FIXME: Support other SatVTs by clamping before or after the conversion.
8259 
8260   Src = Src.getOperand(0);
8261 
8262   SDLoc DL(N);
8263   SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src,
8264                                 DAG.getTargetConstant(FRM, DL, XLenVT));
8265 
8266   // RISCV FP-to-int conversions saturate to the destination register size, but
8267   // don't produce 0 for nan.
8268   SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
8269   return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
8270 }
8271 
8272 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
8273 // smaller than XLenVT.
8274 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
8275                                         const RISCVSubtarget &Subtarget) {
8276   assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
8277 
8278   SDValue Src = N->getOperand(0);
8279   if (Src.getOpcode() != ISD::BSWAP)
8280     return SDValue();
8281 
8282   EVT VT = N->getValueType(0);
8283   if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
8284       !isPowerOf2_32(VT.getSizeInBits()))
8285     return SDValue();
8286 
8287   SDLoc DL(N);
8288   return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
8289                      DAG.getConstant(7, DL, VT));
8290 }
8291 
8292 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
8293                                                DAGCombinerInfo &DCI) const {
8294   SelectionDAG &DAG = DCI.DAG;
8295 
8296   // Helper to call SimplifyDemandedBits on an operand of N where only some low
8297   // bits are demanded. N will be added to the Worklist if it was not deleted.
8298   // Caller should return SDValue(N, 0) if this returns true.
8299   auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
8300     SDValue Op = N->getOperand(OpNo);
8301     APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
8302     if (!SimplifyDemandedBits(Op, Mask, DCI))
8303       return false;
8304 
8305     if (N->getOpcode() != ISD::DELETED_NODE)
8306       DCI.AddToWorklist(N);
8307     return true;
8308   };
8309 
8310   switch (N->getOpcode()) {
8311   default:
8312     break;
8313   case RISCVISD::SplitF64: {
8314     SDValue Op0 = N->getOperand(0);
8315     // If the input to SplitF64 is just BuildPairF64 then the operation is
8316     // redundant. Instead, use BuildPairF64's operands directly.
8317     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
8318       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
8319 
8320     if (Op0->isUndef()) {
8321       SDValue Lo = DAG.getUNDEF(MVT::i32);
8322       SDValue Hi = DAG.getUNDEF(MVT::i32);
8323       return DCI.CombineTo(N, Lo, Hi);
8324     }
8325 
8326     SDLoc DL(N);
8327 
8328     // It's cheaper to materialise two 32-bit integers than to load a double
8329     // from the constant pool and transfer it to integer registers through the
8330     // stack.
8331     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
8332       APInt V = C->getValueAPF().bitcastToAPInt();
8333       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
8334       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
8335       return DCI.CombineTo(N, Lo, Hi);
8336     }
8337 
8338     // This is a target-specific version of a DAGCombine performed in
8339     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8340     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8341     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8342     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8343         !Op0.getNode()->hasOneUse())
8344       break;
8345     SDValue NewSplitF64 =
8346         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
8347                     Op0.getOperand(0));
8348     SDValue Lo = NewSplitF64.getValue(0);
8349     SDValue Hi = NewSplitF64.getValue(1);
8350     APInt SignBit = APInt::getSignMask(32);
8351     if (Op0.getOpcode() == ISD::FNEG) {
8352       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
8353                                   DAG.getConstant(SignBit, DL, MVT::i32));
8354       return DCI.CombineTo(N, Lo, NewHi);
8355     }
8356     assert(Op0.getOpcode() == ISD::FABS);
8357     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
8358                                 DAG.getConstant(~SignBit, DL, MVT::i32));
8359     return DCI.CombineTo(N, Lo, NewHi);
8360   }
8361   case RISCVISD::SLLW:
8362   case RISCVISD::SRAW:
8363   case RISCVISD::SRLW: {
8364     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8365     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8366         SimplifyDemandedLowBitsHelper(1, 5))
8367       return SDValue(N, 0);
8368 
8369     break;
8370   }
8371   case ISD::ROTR:
8372   case ISD::ROTL:
8373   case RISCVISD::RORW:
8374   case RISCVISD::ROLW: {
8375     if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) {
8376       // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8377       if (SimplifyDemandedLowBitsHelper(0, 32) ||
8378           SimplifyDemandedLowBitsHelper(1, 5))
8379         return SDValue(N, 0);
8380     }
8381 
8382     return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget);
8383   }
8384   case RISCVISD::CLZW:
8385   case RISCVISD::CTZW: {
8386     // Only the lower 32 bits of the first operand are read
8387     if (SimplifyDemandedLowBitsHelper(0, 32))
8388       return SDValue(N, 0);
8389     break;
8390   }
8391   case RISCVISD::GREV:
8392   case RISCVISD::GORC: {
8393     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
8394     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8395     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8396     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
8397       return SDValue(N, 0);
8398 
8399     return combineGREVI_GORCI(N, DAG);
8400   }
8401   case RISCVISD::GREVW:
8402   case RISCVISD::GORCW: {
8403     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
8404     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8405         SimplifyDemandedLowBitsHelper(1, 5))
8406       return SDValue(N, 0);
8407 
8408     break;
8409   }
8410   case RISCVISD::SHFL:
8411   case RISCVISD::UNSHFL: {
8412     // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
8413     unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
8414     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8415     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
8416       return SDValue(N, 0);
8417 
8418     break;
8419   }
8420   case RISCVISD::SHFLW:
8421   case RISCVISD::UNSHFLW: {
8422     // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
8423     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8424         SimplifyDemandedLowBitsHelper(1, 4))
8425       return SDValue(N, 0);
8426 
8427     break;
8428   }
8429   case RISCVISD::BCOMPRESSW:
8430   case RISCVISD::BDECOMPRESSW: {
8431     // Only the lower 32 bits of LHS and RHS are read.
8432     if (SimplifyDemandedLowBitsHelper(0, 32) ||
8433         SimplifyDemandedLowBitsHelper(1, 32))
8434       return SDValue(N, 0);
8435 
8436     break;
8437   }
8438   case RISCVISD::FSR:
8439   case RISCVISD::FSL:
8440   case RISCVISD::FSRW:
8441   case RISCVISD::FSLW: {
8442     bool IsWInstruction =
8443         N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
8444     unsigned BitWidth =
8445         IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
8446     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
8447     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
8448     if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
8449       return SDValue(N, 0);
8450 
8451     break;
8452   }
8453   case RISCVISD::FMV_X_ANYEXTH:
8454   case RISCVISD::FMV_X_ANYEXTW_RV64: {
8455     SDLoc DL(N);
8456     SDValue Op0 = N->getOperand(0);
8457     MVT VT = N->getSimpleValueType(0);
8458     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
8459     // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
8460     // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
8461     if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
8462          Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
8463         (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
8464          Op0->getOpcode() == RISCVISD::FMV_H_X)) {
8465       assert(Op0.getOperand(0).getValueType() == VT &&
8466              "Unexpected value type!");
8467       return Op0.getOperand(0);
8468     }
8469 
8470     // This is a target-specific version of a DAGCombine performed in
8471     // DAGCombiner::visitBITCAST. It performs the equivalent of:
8472     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
8473     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
8474     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
8475         !Op0.getNode()->hasOneUse())
8476       break;
8477     SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
8478     unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
8479     APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
8480     if (Op0.getOpcode() == ISD::FNEG)
8481       return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
8482                          DAG.getConstant(SignBit, DL, VT));
8483 
8484     assert(Op0.getOpcode() == ISD::FABS);
8485     return DAG.getNode(ISD::AND, DL, VT, NewFMV,
8486                        DAG.getConstant(~SignBit, DL, VT));
8487   }
8488   case ISD::ADD:
8489     return performADDCombine(N, DAG, Subtarget);
8490   case ISD::SUB:
8491     return performSUBCombine(N, DAG);
8492   case ISD::AND:
8493     return performANDCombine(N, DAG);
8494   case ISD::OR:
8495     return performORCombine(N, DAG, Subtarget);
8496   case ISD::XOR:
8497     return performXORCombine(N, DAG);
8498   case ISD::SIGN_EXTEND_INREG:
8499     return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
8500   case ISD::ZERO_EXTEND:
8501     // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
8502     // type legalization. This is safe because fp_to_uint produces poison if
8503     // it overflows.
8504     if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) {
8505       SDValue Src = N->getOperand(0);
8506       if (Src.getOpcode() == ISD::FP_TO_UINT &&
8507           isTypeLegal(Src.getOperand(0).getValueType()))
8508         return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
8509                            Src.getOperand(0));
8510       if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() &&
8511           isTypeLegal(Src.getOperand(1).getValueType())) {
8512         SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other);
8513         SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs,
8514                                   Src.getOperand(0), Src.getOperand(1));
8515         DCI.CombineTo(N, Res);
8516         DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1));
8517         DCI.recursivelyDeleteUnusedNodes(Src.getNode());
8518         return SDValue(N, 0); // Return N so it doesn't get rechecked.
8519       }
8520     }
8521     return SDValue();
8522   case RISCVISD::SELECT_CC: {
8523     // Transform
8524     SDValue LHS = N->getOperand(0);
8525     SDValue RHS = N->getOperand(1);
8526     SDValue TrueV = N->getOperand(3);
8527     SDValue FalseV = N->getOperand(4);
8528 
8529     // If the True and False values are the same, we don't need a select_cc.
8530     if (TrueV == FalseV)
8531       return TrueV;
8532 
8533     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
8534     if (!ISD::isIntEqualitySetCC(CCVal))
8535       break;
8536 
8537     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
8538     //      (select_cc X, Y, lt, trueV, falseV)
8539     // Sometimes the setcc is introduced after select_cc has been formed.
8540     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8541         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8542       // If we're looking for eq 0 instead of ne 0, we need to invert the
8543       // condition.
8544       bool Invert = CCVal == ISD::SETEQ;
8545       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8546       if (Invert)
8547         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8548 
8549       SDLoc DL(N);
8550       RHS = LHS.getOperand(1);
8551       LHS = LHS.getOperand(0);
8552       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8553 
8554       SDValue TargetCC = DAG.getCondCode(CCVal);
8555       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8556                          {LHS, RHS, TargetCC, TrueV, FalseV});
8557     }
8558 
8559     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
8560     //      (select_cc X, Y, eq/ne, trueV, falseV)
8561     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8562       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
8563                          {LHS.getOperand(0), LHS.getOperand(1),
8564                           N->getOperand(2), TrueV, FalseV});
8565     // (select_cc X, 1, setne, trueV, falseV) ->
8566     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
8567     // This can occur when legalizing some floating point comparisons.
8568     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8569     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8570       SDLoc DL(N);
8571       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8572       SDValue TargetCC = DAG.getCondCode(CCVal);
8573       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8574       return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
8575                          {LHS, RHS, TargetCC, TrueV, FalseV});
8576     }
8577 
8578     break;
8579   }
8580   case RISCVISD::BR_CC: {
8581     SDValue LHS = N->getOperand(1);
8582     SDValue RHS = N->getOperand(2);
8583     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
8584     if (!ISD::isIntEqualitySetCC(CCVal))
8585       break;
8586 
8587     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
8588     //      (br_cc X, Y, lt, dest)
8589     // Sometimes the setcc is introduced after br_cc has been formed.
8590     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
8591         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
8592       // If we're looking for eq 0 instead of ne 0, we need to invert the
8593       // condition.
8594       bool Invert = CCVal == ISD::SETEQ;
8595       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
8596       if (Invert)
8597         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8598 
8599       SDLoc DL(N);
8600       RHS = LHS.getOperand(1);
8601       LHS = LHS.getOperand(0);
8602       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
8603 
8604       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8605                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
8606                          N->getOperand(4));
8607     }
8608 
8609     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
8610     //      (br_cc X, Y, eq/ne, trueV, falseV)
8611     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
8612       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
8613                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
8614                          N->getOperand(3), N->getOperand(4));
8615 
8616     // (br_cc X, 1, setne, br_cc) ->
8617     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
8618     // This can occur when legalizing some floating point comparisons.
8619     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
8620     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
8621       SDLoc DL(N);
8622       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
8623       SDValue TargetCC = DAG.getCondCode(CCVal);
8624       RHS = DAG.getConstant(0, DL, LHS.getValueType());
8625       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
8626                          N->getOperand(0), LHS, RHS, TargetCC,
8627                          N->getOperand(4));
8628     }
8629     break;
8630   }
8631   case ISD::BITREVERSE:
8632     return performBITREVERSECombine(N, DAG, Subtarget);
8633   case ISD::FP_TO_SINT:
8634   case ISD::FP_TO_UINT:
8635     return performFP_TO_INTCombine(N, DCI, Subtarget);
8636   case ISD::FP_TO_SINT_SAT:
8637   case ISD::FP_TO_UINT_SAT:
8638     return performFP_TO_INT_SATCombine(N, DCI, Subtarget);
8639   case ISD::FCOPYSIGN: {
8640     EVT VT = N->getValueType(0);
8641     if (!VT.isVector())
8642       break;
8643     // There is a form of VFSGNJ which injects the negated sign of its second
8644     // operand. Try and bubble any FNEG up after the extend/round to produce
8645     // this optimized pattern. Avoid modifying cases where FP_ROUND and
8646     // TRUNC=1.
8647     SDValue In2 = N->getOperand(1);
8648     // Avoid cases where the extend/round has multiple uses, as duplicating
8649     // those is typically more expensive than removing a fneg.
8650     if (!In2.hasOneUse())
8651       break;
8652     if (In2.getOpcode() != ISD::FP_EXTEND &&
8653         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
8654       break;
8655     In2 = In2.getOperand(0);
8656     if (In2.getOpcode() != ISD::FNEG)
8657       break;
8658     SDLoc DL(N);
8659     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
8660     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
8661                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
8662   }
8663   case ISD::MGATHER:
8664   case ISD::MSCATTER:
8665   case ISD::VP_GATHER:
8666   case ISD::VP_SCATTER: {
8667     if (!DCI.isBeforeLegalize())
8668       break;
8669     SDValue Index, ScaleOp;
8670     bool IsIndexScaled = false;
8671     bool IsIndexSigned = false;
8672     if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
8673       Index = VPGSN->getIndex();
8674       ScaleOp = VPGSN->getScale();
8675       IsIndexScaled = VPGSN->isIndexScaled();
8676       IsIndexSigned = VPGSN->isIndexSigned();
8677     } else {
8678       const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
8679       Index = MGSN->getIndex();
8680       ScaleOp = MGSN->getScale();
8681       IsIndexScaled = MGSN->isIndexScaled();
8682       IsIndexSigned = MGSN->isIndexSigned();
8683     }
8684     EVT IndexVT = Index.getValueType();
8685     MVT XLenVT = Subtarget.getXLenVT();
8686     // RISCV indexed loads only support the "unsigned unscaled" addressing
8687     // mode, so anything else must be manually legalized.
8688     bool NeedsIdxLegalization =
8689         IsIndexScaled ||
8690         (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
8691     if (!NeedsIdxLegalization)
8692       break;
8693 
8694     SDLoc DL(N);
8695 
8696     // Any index legalization should first promote to XLenVT, so we don't lose
8697     // bits when scaling. This may create an illegal index type so we let
8698     // LLVM's legalization take care of the splitting.
8699     // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
8700     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
8701       IndexVT = IndexVT.changeVectorElementType(XLenVT);
8702       Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
8703                           DL, IndexVT, Index);
8704     }
8705 
8706     unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
8707     if (IsIndexScaled && Scale != 1) {
8708       // Manually scale the indices by the element size.
8709       // TODO: Sanitize the scale operand here?
8710       // TODO: For VP nodes, should we use VP_SHL here?
8711       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
8712       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
8713       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
8714     }
8715 
8716     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
8717     if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
8718       return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
8719                              {VPGN->getChain(), VPGN->getBasePtr(), Index,
8720                               VPGN->getScale(), VPGN->getMask(),
8721                               VPGN->getVectorLength()},
8722                              VPGN->getMemOperand(), NewIndexTy);
8723     if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
8724       return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
8725                               {VPSN->getChain(), VPSN->getValue(),
8726                                VPSN->getBasePtr(), Index, VPSN->getScale(),
8727                                VPSN->getMask(), VPSN->getVectorLength()},
8728                               VPSN->getMemOperand(), NewIndexTy);
8729     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
8730       return DAG.getMaskedGather(
8731           N->getVTList(), MGN->getMemoryVT(), DL,
8732           {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
8733            MGN->getBasePtr(), Index, MGN->getScale()},
8734           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
8735     const auto *MSN = cast<MaskedScatterSDNode>(N);
8736     return DAG.getMaskedScatter(
8737         N->getVTList(), MSN->getMemoryVT(), DL,
8738         {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
8739          Index, MSN->getScale()},
8740         MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
8741   }
8742   case RISCVISD::SRA_VL:
8743   case RISCVISD::SRL_VL:
8744   case RISCVISD::SHL_VL: {
8745     SDValue ShAmt = N->getOperand(1);
8746     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8747       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8748       SDLoc DL(N);
8749       SDValue VL = N->getOperand(3);
8750       EVT VT = N->getValueType(0);
8751       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8752                           ShAmt.getOperand(1), VL);
8753       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
8754                          N->getOperand(2), N->getOperand(3));
8755     }
8756     break;
8757   }
8758   case ISD::SRA:
8759   case ISD::SRL:
8760   case ISD::SHL: {
8761     SDValue ShAmt = N->getOperand(1);
8762     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
8763       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
8764       SDLoc DL(N);
8765       EVT VT = N->getValueType(0);
8766       ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT),
8767                           ShAmt.getOperand(1),
8768                           DAG.getRegister(RISCV::X0, Subtarget.getXLenVT()));
8769       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
8770     }
8771     break;
8772   }
8773   case RISCVISD::ADD_VL:
8774     if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false))
8775       return V;
8776     return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true);
8777   case RISCVISD::SUB_VL:
8778     return combineADDSUB_VLToVWADDSUB_VL(N, DAG);
8779   case RISCVISD::VWADD_W_VL:
8780   case RISCVISD::VWADDU_W_VL:
8781   case RISCVISD::VWSUB_W_VL:
8782   case RISCVISD::VWSUBU_W_VL:
8783     return combineVWADD_W_VL_VWSUB_W_VL(N, DAG);
8784   case RISCVISD::MUL_VL:
8785     if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false))
8786       return V;
8787     // Mul is commutative.
8788     return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true);
8789   case ISD::STORE: {
8790     auto *Store = cast<StoreSDNode>(N);
8791     SDValue Val = Store->getValue();
8792     // Combine store of vmv.x.s to vse with VL of 1.
8793     // FIXME: Support FP.
8794     if (Val.getOpcode() == RISCVISD::VMV_X_S) {
8795       SDValue Src = Val.getOperand(0);
8796       EVT VecVT = Src.getValueType();
8797       EVT MemVT = Store->getMemoryVT();
8798       // The memory VT and the element type must match.
8799       if (VecVT.getVectorElementType() == MemVT) {
8800         SDLoc DL(N);
8801         MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
8802         return DAG.getStoreVP(
8803             Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(),
8804             DAG.getConstant(1, DL, MaskVT),
8805             DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT,
8806             Store->getMemOperand(), Store->getAddressingMode(),
8807             Store->isTruncatingStore(), /*IsCompress*/ false);
8808       }
8809     }
8810 
8811     break;
8812   }
8813   case ISD::SPLAT_VECTOR: {
8814     EVT VT = N->getValueType(0);
8815     // Only perform this combine on legal MVT types.
8816     if (!isTypeLegal(VT))
8817       break;
8818     if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N,
8819                                          DAG, Subtarget))
8820       return Gather;
8821     break;
8822   }
8823   case RISCVISD::VMV_V_X_VL: {
8824     // Tail agnostic VMV.V.X only demands the vector element bitwidth from the
8825     // scalar input.
8826     unsigned ScalarSize = N->getOperand(1).getValueSizeInBits();
8827     unsigned EltWidth = N->getValueType(0).getScalarSizeInBits();
8828     if (ScalarSize > EltWidth && N->getOperand(0).isUndef())
8829       if (SimplifyDemandedLowBitsHelper(1, EltWidth))
8830         return SDValue(N, 0);
8831 
8832     break;
8833   }
8834   case ISD::INTRINSIC_WO_CHAIN: {
8835     unsigned IntNo = N->getConstantOperandVal(0);
8836     switch (IntNo) {
8837       // By default we do not combine any intrinsic.
8838     default:
8839       return SDValue();
8840     case Intrinsic::riscv_vcpop:
8841     case Intrinsic::riscv_vcpop_mask:
8842     case Intrinsic::riscv_vfirst:
8843     case Intrinsic::riscv_vfirst_mask: {
8844       SDValue VL = N->getOperand(2);
8845       if (IntNo == Intrinsic::riscv_vcpop_mask ||
8846           IntNo == Intrinsic::riscv_vfirst_mask)
8847         VL = N->getOperand(3);
8848       if (!isNullConstant(VL))
8849         return SDValue();
8850       // If VL is 0, vcpop -> li 0, vfirst -> li -1.
8851       SDLoc DL(N);
8852       EVT VT = N->getValueType(0);
8853       if (IntNo == Intrinsic::riscv_vfirst ||
8854           IntNo == Intrinsic::riscv_vfirst_mask)
8855         return DAG.getConstant(-1, DL, VT);
8856       return DAG.getConstant(0, DL, VT);
8857     }
8858     }
8859   }
8860   }
8861 
8862   return SDValue();
8863 }
8864 
8865 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
8866     const SDNode *N, CombineLevel Level) const {
8867   // The following folds are only desirable if `(OP _, c1 << c2)` can be
8868   // materialised in fewer instructions than `(OP _, c1)`:
8869   //
8870   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
8871   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
8872   SDValue N0 = N->getOperand(0);
8873   EVT Ty = N0.getValueType();
8874   if (Ty.isScalarInteger() &&
8875       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
8876     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
8877     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
8878     if (C1 && C2) {
8879       const APInt &C1Int = C1->getAPIntValue();
8880       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
8881 
8882       // We can materialise `c1 << c2` into an add immediate, so it's "free",
8883       // and the combine should happen, to potentially allow further combines
8884       // later.
8885       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
8886           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
8887         return true;
8888 
8889       // We can materialise `c1` in an add immediate, so it's "free", and the
8890       // combine should be prevented.
8891       if (C1Int.getMinSignedBits() <= 64 &&
8892           isLegalAddImmediate(C1Int.getSExtValue()))
8893         return false;
8894 
8895       // Neither constant will fit into an immediate, so find materialisation
8896       // costs.
8897       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
8898                                               Subtarget.getFeatureBits(),
8899                                               /*CompressionCost*/true);
8900       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
8901           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
8902           /*CompressionCost*/true);
8903 
8904       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
8905       // combine should be prevented.
8906       if (C1Cost < ShiftedC1Cost)
8907         return false;
8908     }
8909   }
8910   return true;
8911 }
8912 
8913 bool RISCVTargetLowering::targetShrinkDemandedConstant(
8914     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
8915     TargetLoweringOpt &TLO) const {
8916   // Delay this optimization as late as possible.
8917   if (!TLO.LegalOps)
8918     return false;
8919 
8920   EVT VT = Op.getValueType();
8921   if (VT.isVector())
8922     return false;
8923 
8924   // Only handle AND for now.
8925   if (Op.getOpcode() != ISD::AND)
8926     return false;
8927 
8928   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
8929   if (!C)
8930     return false;
8931 
8932   const APInt &Mask = C->getAPIntValue();
8933 
8934   // Clear all non-demanded bits initially.
8935   APInt ShrunkMask = Mask & DemandedBits;
8936 
8937   // Try to make a smaller immediate by setting undemanded bits.
8938 
8939   APInt ExpandedMask = Mask | ~DemandedBits;
8940 
8941   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
8942     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
8943   };
8944   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
8945     if (NewMask == Mask)
8946       return true;
8947     SDLoc DL(Op);
8948     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
8949     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
8950     return TLO.CombineTo(Op, NewOp);
8951   };
8952 
8953   // If the shrunk mask fits in sign extended 12 bits, let the target
8954   // independent code apply it.
8955   if (ShrunkMask.isSignedIntN(12))
8956     return false;
8957 
8958   // Preserve (and X, 0xffff) when zext.h is supported.
8959   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
8960     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
8961     if (IsLegalMask(NewMask))
8962       return UseMask(NewMask);
8963   }
8964 
8965   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
8966   if (VT == MVT::i64) {
8967     APInt NewMask = APInt(64, 0xffffffff);
8968     if (IsLegalMask(NewMask))
8969       return UseMask(NewMask);
8970   }
8971 
8972   // For the remaining optimizations, we need to be able to make a negative
8973   // number through a combination of mask and undemanded bits.
8974   if (!ExpandedMask.isNegative())
8975     return false;
8976 
8977   // What is the fewest number of bits we need to represent the negative number.
8978   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
8979 
8980   // Try to make a 12 bit negative immediate. If that fails try to make a 32
8981   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
8982   APInt NewMask = ShrunkMask;
8983   if (MinSignedBits <= 12)
8984     NewMask.setBitsFrom(11);
8985   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
8986     NewMask.setBitsFrom(31);
8987   else
8988     return false;
8989 
8990   // Check that our new mask is a subset of the demanded mask.
8991   assert(IsLegalMask(NewMask));
8992   return UseMask(NewMask);
8993 }
8994 
8995 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {
8996   static const uint64_t GREVMasks[] = {
8997       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
8998       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
8999 
9000   for (unsigned Stage = 0; Stage != 6; ++Stage) {
9001     unsigned Shift = 1 << Stage;
9002     if (ShAmt & Shift) {
9003       uint64_t Mask = GREVMasks[Stage];
9004       uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask);
9005       if (IsGORC)
9006         Res |= x;
9007       x = Res;
9008     }
9009   }
9010 
9011   return x;
9012 }
9013 
9014 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
9015                                                         KnownBits &Known,
9016                                                         const APInt &DemandedElts,
9017                                                         const SelectionDAG &DAG,
9018                                                         unsigned Depth) const {
9019   unsigned BitWidth = Known.getBitWidth();
9020   unsigned Opc = Op.getOpcode();
9021   assert((Opc >= ISD::BUILTIN_OP_END ||
9022           Opc == ISD::INTRINSIC_WO_CHAIN ||
9023           Opc == ISD::INTRINSIC_W_CHAIN ||
9024           Opc == ISD::INTRINSIC_VOID) &&
9025          "Should use MaskedValueIsZero if you don't know whether Op"
9026          " is a target node!");
9027 
9028   Known.resetAll();
9029   switch (Opc) {
9030   default: break;
9031   case RISCVISD::SELECT_CC: {
9032     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
9033     // If we don't know any bits, early out.
9034     if (Known.isUnknown())
9035       break;
9036     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
9037 
9038     // Only known if known in both the LHS and RHS.
9039     Known = KnownBits::commonBits(Known, Known2);
9040     break;
9041   }
9042   case RISCVISD::REMUW: {
9043     KnownBits Known2;
9044     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9045     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9046     // We only care about the lower 32 bits.
9047     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
9048     // Restore the original width by sign extending.
9049     Known = Known.sext(BitWidth);
9050     break;
9051   }
9052   case RISCVISD::DIVUW: {
9053     KnownBits Known2;
9054     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
9055     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
9056     // We only care about the lower 32 bits.
9057     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
9058     // Restore the original width by sign extending.
9059     Known = Known.sext(BitWidth);
9060     break;
9061   }
9062   case RISCVISD::CTZW: {
9063     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9064     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
9065     unsigned LowBits = Log2_32(PossibleTZ) + 1;
9066     Known.Zero.setBitsFrom(LowBits);
9067     break;
9068   }
9069   case RISCVISD::CLZW: {
9070     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9071     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
9072     unsigned LowBits = Log2_32(PossibleLZ) + 1;
9073     Known.Zero.setBitsFrom(LowBits);
9074     break;
9075   }
9076   case RISCVISD::GREV:
9077   case RISCVISD::GORC: {
9078     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
9079       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
9080       unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1);
9081       bool IsGORC = Op.getOpcode() == RISCVISD::GORC;
9082       // To compute zeros, we need to invert the value and invert it back after.
9083       Known.Zero =
9084           ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC);
9085       Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC);
9086     }
9087     break;
9088   }
9089   case RISCVISD::READ_VLENB: {
9090     // If we know the minimum VLen from Zvl extensions, we can use that to
9091     // determine the trailing zeros of VLENB.
9092     // FIXME: Limit to 128 bit vectors until we have more testing.
9093     unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
9094     if (MinVLenB > 0)
9095       Known.Zero.setLowBits(Log2_32(MinVLenB));
9096     // We assume VLENB is no more than 65536 / 8 bytes.
9097     Known.Zero.setBitsFrom(14);
9098     break;
9099   }
9100   case ISD::INTRINSIC_W_CHAIN:
9101   case ISD::INTRINSIC_WO_CHAIN: {
9102     unsigned IntNo =
9103         Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1);
9104     switch (IntNo) {
9105     default:
9106       // We can't do anything for most intrinsics.
9107       break;
9108     case Intrinsic::riscv_vsetvli:
9109     case Intrinsic::riscv_vsetvlimax:
9110     case Intrinsic::riscv_vsetvli_opt:
9111     case Intrinsic::riscv_vsetvlimax_opt:
9112       // Assume that VL output is positive and would fit in an int32_t.
9113       // TODO: VLEN might be capped at 16 bits in a future V spec update.
9114       if (BitWidth >= 32)
9115         Known.Zero.setBitsFrom(31);
9116       break;
9117     }
9118     break;
9119   }
9120   }
9121 }
9122 
9123 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
9124     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
9125     unsigned Depth) const {
9126   switch (Op.getOpcode()) {
9127   default:
9128     break;
9129   case RISCVISD::SELECT_CC: {
9130     unsigned Tmp =
9131         DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
9132     if (Tmp == 1) return 1;  // Early out.
9133     unsigned Tmp2 =
9134         DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
9135     return std::min(Tmp, Tmp2);
9136   }
9137   case RISCVISD::SLLW:
9138   case RISCVISD::SRAW:
9139   case RISCVISD::SRLW:
9140   case RISCVISD::DIVW:
9141   case RISCVISD::DIVUW:
9142   case RISCVISD::REMUW:
9143   case RISCVISD::ROLW:
9144   case RISCVISD::RORW:
9145   case RISCVISD::GREVW:
9146   case RISCVISD::GORCW:
9147   case RISCVISD::FSLW:
9148   case RISCVISD::FSRW:
9149   case RISCVISD::SHFLW:
9150   case RISCVISD::UNSHFLW:
9151   case RISCVISD::BCOMPRESSW:
9152   case RISCVISD::BDECOMPRESSW:
9153   case RISCVISD::BFPW:
9154   case RISCVISD::FCVT_W_RV64:
9155   case RISCVISD::FCVT_WU_RV64:
9156   case RISCVISD::STRICT_FCVT_W_RV64:
9157   case RISCVISD::STRICT_FCVT_WU_RV64:
9158     // TODO: As the result is sign-extended, this is conservatively correct. A
9159     // more precise answer could be calculated for SRAW depending on known
9160     // bits in the shift amount.
9161     return 33;
9162   case RISCVISD::SHFL:
9163   case RISCVISD::UNSHFL: {
9164     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
9165     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
9166     // will stay within the upper 32 bits. If there were more than 32 sign bits
9167     // before there will be at least 33 sign bits after.
9168     if (Op.getValueType() == MVT::i64 &&
9169         isa<ConstantSDNode>(Op.getOperand(1)) &&
9170         (Op.getConstantOperandVal(1) & 0x10) == 0) {
9171       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
9172       if (Tmp > 32)
9173         return 33;
9174     }
9175     break;
9176   }
9177   case RISCVISD::VMV_X_S: {
9178     // The number of sign bits of the scalar result is computed by obtaining the
9179     // element type of the input vector operand, subtracting its width from the
9180     // XLEN, and then adding one (sign bit within the element type). If the
9181     // element type is wider than XLen, the least-significant XLEN bits are
9182     // taken.
9183     unsigned XLen = Subtarget.getXLen();
9184     unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits();
9185     if (EltBits <= XLen)
9186       return XLen - EltBits + 1;
9187     break;
9188   }
9189   }
9190 
9191   return 1;
9192 }
9193 
9194 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
9195                                                   MachineBasicBlock *BB) {
9196   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
9197 
9198   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
9199   // Should the count have wrapped while it was being read, we need to try
9200   // again.
9201   // ...
9202   // read:
9203   // rdcycleh x3 # load high word of cycle
9204   // rdcycle  x2 # load low word of cycle
9205   // rdcycleh x4 # load high word of cycle
9206   // bne x3, x4, read # check if high word reads match, otherwise try again
9207   // ...
9208 
9209   MachineFunction &MF = *BB->getParent();
9210   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9211   MachineFunction::iterator It = ++BB->getIterator();
9212 
9213   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9214   MF.insert(It, LoopMBB);
9215 
9216   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
9217   MF.insert(It, DoneMBB);
9218 
9219   // Transfer the remainder of BB and its successor edges to DoneMBB.
9220   DoneMBB->splice(DoneMBB->begin(), BB,
9221                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
9222   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
9223 
9224   BB->addSuccessor(LoopMBB);
9225 
9226   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9227   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
9228   Register LoReg = MI.getOperand(0).getReg();
9229   Register HiReg = MI.getOperand(1).getReg();
9230   DebugLoc DL = MI.getDebugLoc();
9231 
9232   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
9233   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
9234       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9235       .addReg(RISCV::X0);
9236   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
9237       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
9238       .addReg(RISCV::X0);
9239   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
9240       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
9241       .addReg(RISCV::X0);
9242 
9243   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
9244       .addReg(HiReg)
9245       .addReg(ReadAgainReg)
9246       .addMBB(LoopMBB);
9247 
9248   LoopMBB->addSuccessor(LoopMBB);
9249   LoopMBB->addSuccessor(DoneMBB);
9250 
9251   MI.eraseFromParent();
9252 
9253   return DoneMBB;
9254 }
9255 
9256 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
9257                                              MachineBasicBlock *BB) {
9258   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
9259 
9260   MachineFunction &MF = *BB->getParent();
9261   DebugLoc DL = MI.getDebugLoc();
9262   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9263   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9264   Register LoReg = MI.getOperand(0).getReg();
9265   Register HiReg = MI.getOperand(1).getReg();
9266   Register SrcReg = MI.getOperand(2).getReg();
9267   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
9268   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9269 
9270   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
9271                           RI);
9272   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9273   MachineMemOperand *MMOLo =
9274       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
9275   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9276       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
9277   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
9278       .addFrameIndex(FI)
9279       .addImm(0)
9280       .addMemOperand(MMOLo);
9281   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
9282       .addFrameIndex(FI)
9283       .addImm(4)
9284       .addMemOperand(MMOHi);
9285   MI.eraseFromParent(); // The pseudo instruction is gone now.
9286   return BB;
9287 }
9288 
9289 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
9290                                                  MachineBasicBlock *BB) {
9291   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
9292          "Unexpected instruction");
9293 
9294   MachineFunction &MF = *BB->getParent();
9295   DebugLoc DL = MI.getDebugLoc();
9296   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
9297   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
9298   Register DstReg = MI.getOperand(0).getReg();
9299   Register LoReg = MI.getOperand(1).getReg();
9300   Register HiReg = MI.getOperand(2).getReg();
9301   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
9302   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
9303 
9304   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
9305   MachineMemOperand *MMOLo =
9306       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
9307   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
9308       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
9309   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9310       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
9311       .addFrameIndex(FI)
9312       .addImm(0)
9313       .addMemOperand(MMOLo);
9314   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
9315       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
9316       .addFrameIndex(FI)
9317       .addImm(4)
9318       .addMemOperand(MMOHi);
9319   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
9320   MI.eraseFromParent(); // The pseudo instruction is gone now.
9321   return BB;
9322 }
9323 
9324 static bool isSelectPseudo(MachineInstr &MI) {
9325   switch (MI.getOpcode()) {
9326   default:
9327     return false;
9328   case RISCV::Select_GPR_Using_CC_GPR:
9329   case RISCV::Select_FPR16_Using_CC_GPR:
9330   case RISCV::Select_FPR32_Using_CC_GPR:
9331   case RISCV::Select_FPR64_Using_CC_GPR:
9332     return true;
9333   }
9334 }
9335 
9336 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
9337                                         unsigned RelOpcode, unsigned EqOpcode,
9338                                         const RISCVSubtarget &Subtarget) {
9339   DebugLoc DL = MI.getDebugLoc();
9340   Register DstReg = MI.getOperand(0).getReg();
9341   Register Src1Reg = MI.getOperand(1).getReg();
9342   Register Src2Reg = MI.getOperand(2).getReg();
9343   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
9344   Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass);
9345   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
9346 
9347   // Save the current FFLAGS.
9348   BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags);
9349 
9350   auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg)
9351                  .addReg(Src1Reg)
9352                  .addReg(Src2Reg);
9353   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9354     MIB->setFlag(MachineInstr::MIFlag::NoFPExcept);
9355 
9356   // Restore the FFLAGS.
9357   BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS))
9358       .addReg(SavedFFlags, RegState::Kill);
9359 
9360   // Issue a dummy FEQ opcode to raise exception for signaling NaNs.
9361   auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0)
9362                   .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill()))
9363                   .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill()));
9364   if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept))
9365     MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept);
9366 
9367   // Erase the pseudoinstruction.
9368   MI.eraseFromParent();
9369   return BB;
9370 }
9371 
9372 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
9373                                            MachineBasicBlock *BB,
9374                                            const RISCVSubtarget &Subtarget) {
9375   // To "insert" Select_* instructions, we actually have to insert the triangle
9376   // control-flow pattern.  The incoming instructions know the destination vreg
9377   // to set, the condition code register to branch on, the true/false values to
9378   // select between, and the condcode to use to select the appropriate branch.
9379   //
9380   // We produce the following control flow:
9381   //     HeadMBB
9382   //     |  \
9383   //     |  IfFalseMBB
9384   //     | /
9385   //    TailMBB
9386   //
9387   // When we find a sequence of selects we attempt to optimize their emission
9388   // by sharing the control flow. Currently we only handle cases where we have
9389   // multiple selects with the exact same condition (same LHS, RHS and CC).
9390   // The selects may be interleaved with other instructions if the other
9391   // instructions meet some requirements we deem safe:
9392   // - They are debug instructions. Otherwise,
9393   // - They do not have side-effects, do not access memory and their inputs do
9394   //   not depend on the results of the select pseudo-instructions.
9395   // The TrueV/FalseV operands of the selects cannot depend on the result of
9396   // previous selects in the sequence.
9397   // These conditions could be further relaxed. See the X86 target for a
9398   // related approach and more information.
9399   Register LHS = MI.getOperand(1).getReg();
9400   Register RHS = MI.getOperand(2).getReg();
9401   auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
9402 
9403   SmallVector<MachineInstr *, 4> SelectDebugValues;
9404   SmallSet<Register, 4> SelectDests;
9405   SelectDests.insert(MI.getOperand(0).getReg());
9406 
9407   MachineInstr *LastSelectPseudo = &MI;
9408 
9409   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
9410        SequenceMBBI != E; ++SequenceMBBI) {
9411     if (SequenceMBBI->isDebugInstr())
9412       continue;
9413     else if (isSelectPseudo(*SequenceMBBI)) {
9414       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
9415           SequenceMBBI->getOperand(2).getReg() != RHS ||
9416           SequenceMBBI->getOperand(3).getImm() != CC ||
9417           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
9418           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
9419         break;
9420       LastSelectPseudo = &*SequenceMBBI;
9421       SequenceMBBI->collectDebugValues(SelectDebugValues);
9422       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
9423     } else {
9424       if (SequenceMBBI->hasUnmodeledSideEffects() ||
9425           SequenceMBBI->mayLoadOrStore())
9426         break;
9427       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
9428             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
9429           }))
9430         break;
9431     }
9432   }
9433 
9434   const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
9435   const BasicBlock *LLVM_BB = BB->getBasicBlock();
9436   DebugLoc DL = MI.getDebugLoc();
9437   MachineFunction::iterator I = ++BB->getIterator();
9438 
9439   MachineBasicBlock *HeadMBB = BB;
9440   MachineFunction *F = BB->getParent();
9441   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
9442   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
9443 
9444   F->insert(I, IfFalseMBB);
9445   F->insert(I, TailMBB);
9446 
9447   // Transfer debug instructions associated with the selects to TailMBB.
9448   for (MachineInstr *DebugInstr : SelectDebugValues) {
9449     TailMBB->push_back(DebugInstr->removeFromParent());
9450   }
9451 
9452   // Move all instructions after the sequence to TailMBB.
9453   TailMBB->splice(TailMBB->end(), HeadMBB,
9454                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
9455   // Update machine-CFG edges by transferring all successors of the current
9456   // block to the new block which will contain the Phi nodes for the selects.
9457   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
9458   // Set the successors for HeadMBB.
9459   HeadMBB->addSuccessor(IfFalseMBB);
9460   HeadMBB->addSuccessor(TailMBB);
9461 
9462   // Insert appropriate branch.
9463   BuildMI(HeadMBB, DL, TII.getBrCond(CC))
9464     .addReg(LHS)
9465     .addReg(RHS)
9466     .addMBB(TailMBB);
9467 
9468   // IfFalseMBB just falls through to TailMBB.
9469   IfFalseMBB->addSuccessor(TailMBB);
9470 
9471   // Create PHIs for all of the select pseudo-instructions.
9472   auto SelectMBBI = MI.getIterator();
9473   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
9474   auto InsertionPoint = TailMBB->begin();
9475   while (SelectMBBI != SelectEnd) {
9476     auto Next = std::next(SelectMBBI);
9477     if (isSelectPseudo(*SelectMBBI)) {
9478       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
9479       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
9480               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
9481           .addReg(SelectMBBI->getOperand(4).getReg())
9482           .addMBB(HeadMBB)
9483           .addReg(SelectMBBI->getOperand(5).getReg())
9484           .addMBB(IfFalseMBB);
9485       SelectMBBI->eraseFromParent();
9486     }
9487     SelectMBBI = Next;
9488   }
9489 
9490   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
9491   return TailMBB;
9492 }
9493 
9494 MachineBasicBlock *
9495 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
9496                                                  MachineBasicBlock *BB) const {
9497   switch (MI.getOpcode()) {
9498   default:
9499     llvm_unreachable("Unexpected instr type to insert");
9500   case RISCV::ReadCycleWide:
9501     assert(!Subtarget.is64Bit() &&
9502            "ReadCycleWrite is only to be used on riscv32");
9503     return emitReadCycleWidePseudo(MI, BB);
9504   case RISCV::Select_GPR_Using_CC_GPR:
9505   case RISCV::Select_FPR16_Using_CC_GPR:
9506   case RISCV::Select_FPR32_Using_CC_GPR:
9507   case RISCV::Select_FPR64_Using_CC_GPR:
9508     return emitSelectPseudo(MI, BB, Subtarget);
9509   case RISCV::BuildPairF64Pseudo:
9510     return emitBuildPairF64Pseudo(MI, BB);
9511   case RISCV::SplitF64Pseudo:
9512     return emitSplitF64Pseudo(MI, BB);
9513   case RISCV::PseudoQuietFLE_H:
9514     return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget);
9515   case RISCV::PseudoQuietFLT_H:
9516     return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget);
9517   case RISCV::PseudoQuietFLE_S:
9518     return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget);
9519   case RISCV::PseudoQuietFLT_S:
9520     return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget);
9521   case RISCV::PseudoQuietFLE_D:
9522     return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget);
9523   case RISCV::PseudoQuietFLT_D:
9524     return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget);
9525   }
9526 }
9527 
9528 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9529                                                         SDNode *Node) const {
9530   // Add FRM dependency to any instructions with dynamic rounding mode.
9531   unsigned Opc = MI.getOpcode();
9532   auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm);
9533   if (Idx < 0)
9534     return;
9535   if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN)
9536     return;
9537   // If the instruction already reads FRM, don't add another read.
9538   if (MI.readsRegister(RISCV::FRM))
9539     return;
9540   MI.addOperand(
9541       MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true));
9542 }
9543 
9544 // Calling Convention Implementation.
9545 // The expectations for frontend ABI lowering vary from target to target.
9546 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
9547 // details, but this is a longer term goal. For now, we simply try to keep the
9548 // role of the frontend as simple and well-defined as possible. The rules can
9549 // be summarised as:
9550 // * Never split up large scalar arguments. We handle them here.
9551 // * If a hardfloat calling convention is being used, and the struct may be
9552 // passed in a pair of registers (fp+fp, int+fp), and both registers are
9553 // available, then pass as two separate arguments. If either the GPRs or FPRs
9554 // are exhausted, then pass according to the rule below.
9555 // * If a struct could never be passed in registers or directly in a stack
9556 // slot (as it is larger than 2*XLEN and the floating point rules don't
9557 // apply), then pass it using a pointer with the byval attribute.
9558 // * If a struct is less than 2*XLEN, then coerce to either a two-element
9559 // word-sized array or a 2*XLEN scalar (depending on alignment).
9560 // * The frontend can determine whether a struct is returned by reference or
9561 // not based on its size and fields. If it will be returned by reference, the
9562 // frontend must modify the prototype so a pointer with the sret annotation is
9563 // passed as the first argument. This is not necessary for large scalar
9564 // returns.
9565 // * Struct return values and varargs should be coerced to structs containing
9566 // register-size fields in the same situations they would be for fixed
9567 // arguments.
9568 
9569 static const MCPhysReg ArgGPRs[] = {
9570   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
9571   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
9572 };
9573 static const MCPhysReg ArgFPR16s[] = {
9574   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
9575   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
9576 };
9577 static const MCPhysReg ArgFPR32s[] = {
9578   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
9579   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
9580 };
9581 static const MCPhysReg ArgFPR64s[] = {
9582   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
9583   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
9584 };
9585 // This is an interim calling convention and it may be changed in the future.
9586 static const MCPhysReg ArgVRs[] = {
9587     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
9588     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
9589     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
9590 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
9591                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
9592                                      RISCV::V20M2, RISCV::V22M2};
9593 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
9594                                      RISCV::V20M4};
9595 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
9596 
9597 // Pass a 2*XLEN argument that has been split into two XLEN values through
9598 // registers or the stack as necessary.
9599 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
9600                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
9601                                 MVT ValVT2, MVT LocVT2,
9602                                 ISD::ArgFlagsTy ArgFlags2) {
9603   unsigned XLenInBytes = XLen / 8;
9604   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9605     // At least one half can be passed via register.
9606     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
9607                                      VA1.getLocVT(), CCValAssign::Full));
9608   } else {
9609     // Both halves must be passed on the stack, with proper alignment.
9610     Align StackAlign =
9611         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
9612     State.addLoc(
9613         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
9614                             State.AllocateStack(XLenInBytes, StackAlign),
9615                             VA1.getLocVT(), CCValAssign::Full));
9616     State.addLoc(CCValAssign::getMem(
9617         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9618         LocVT2, CCValAssign::Full));
9619     return false;
9620   }
9621 
9622   if (Register Reg = State.AllocateReg(ArgGPRs)) {
9623     // The second half can also be passed via register.
9624     State.addLoc(
9625         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
9626   } else {
9627     // The second half is passed via the stack, without additional alignment.
9628     State.addLoc(CCValAssign::getMem(
9629         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
9630         LocVT2, CCValAssign::Full));
9631   }
9632 
9633   return false;
9634 }
9635 
9636 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
9637                                Optional<unsigned> FirstMaskArgument,
9638                                CCState &State, const RISCVTargetLowering &TLI) {
9639   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
9640   if (RC == &RISCV::VRRegClass) {
9641     // Assign the first mask argument to V0.
9642     // This is an interim calling convention and it may be changed in the
9643     // future.
9644     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
9645       return State.AllocateReg(RISCV::V0);
9646     return State.AllocateReg(ArgVRs);
9647   }
9648   if (RC == &RISCV::VRM2RegClass)
9649     return State.AllocateReg(ArgVRM2s);
9650   if (RC == &RISCV::VRM4RegClass)
9651     return State.AllocateReg(ArgVRM4s);
9652   if (RC == &RISCV::VRM8RegClass)
9653     return State.AllocateReg(ArgVRM8s);
9654   llvm_unreachable("Unhandled register class for ValueType");
9655 }
9656 
9657 // Implements the RISC-V calling convention. Returns true upon failure.
9658 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
9659                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
9660                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
9661                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
9662                      Optional<unsigned> FirstMaskArgument) {
9663   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
9664   assert(XLen == 32 || XLen == 64);
9665   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
9666 
9667   // Any return value split in to more than two values can't be returned
9668   // directly. Vectors are returned via the available vector registers.
9669   if (!LocVT.isVector() && IsRet && ValNo > 1)
9670     return true;
9671 
9672   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
9673   // variadic argument, or if no F16/F32 argument registers are available.
9674   bool UseGPRForF16_F32 = true;
9675   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
9676   // variadic argument, or if no F64 argument registers are available.
9677   bool UseGPRForF64 = true;
9678 
9679   switch (ABI) {
9680   default:
9681     llvm_unreachable("Unexpected ABI");
9682   case RISCVABI::ABI_ILP32:
9683   case RISCVABI::ABI_LP64:
9684     break;
9685   case RISCVABI::ABI_ILP32F:
9686   case RISCVABI::ABI_LP64F:
9687     UseGPRForF16_F32 = !IsFixed;
9688     break;
9689   case RISCVABI::ABI_ILP32D:
9690   case RISCVABI::ABI_LP64D:
9691     UseGPRForF16_F32 = !IsFixed;
9692     UseGPRForF64 = !IsFixed;
9693     break;
9694   }
9695 
9696   // FPR16, FPR32, and FPR64 alias each other.
9697   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
9698     UseGPRForF16_F32 = true;
9699     UseGPRForF64 = true;
9700   }
9701 
9702   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
9703   // similar local variables rather than directly checking against the target
9704   // ABI.
9705 
9706   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
9707     LocVT = XLenVT;
9708     LocInfo = CCValAssign::BCvt;
9709   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
9710     LocVT = MVT::i64;
9711     LocInfo = CCValAssign::BCvt;
9712   }
9713 
9714   // If this is a variadic argument, the RISC-V calling convention requires
9715   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
9716   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
9717   // be used regardless of whether the original argument was split during
9718   // legalisation or not. The argument will not be passed by registers if the
9719   // original type is larger than 2*XLEN, so the register alignment rule does
9720   // not apply.
9721   unsigned TwoXLenInBytes = (2 * XLen) / 8;
9722   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
9723       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
9724     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
9725     // Skip 'odd' register if necessary.
9726     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
9727       State.AllocateReg(ArgGPRs);
9728   }
9729 
9730   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
9731   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
9732       State.getPendingArgFlags();
9733 
9734   assert(PendingLocs.size() == PendingArgFlags.size() &&
9735          "PendingLocs and PendingArgFlags out of sync");
9736 
9737   // Handle passing f64 on RV32D with a soft float ABI or when floating point
9738   // registers are exhausted.
9739   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
9740     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
9741            "Can't lower f64 if it is split");
9742     // Depending on available argument GPRS, f64 may be passed in a pair of
9743     // GPRs, split between a GPR and the stack, or passed completely on the
9744     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
9745     // cases.
9746     Register Reg = State.AllocateReg(ArgGPRs);
9747     LocVT = MVT::i32;
9748     if (!Reg) {
9749       unsigned StackOffset = State.AllocateStack(8, Align(8));
9750       State.addLoc(
9751           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9752       return false;
9753     }
9754     if (!State.AllocateReg(ArgGPRs))
9755       State.AllocateStack(4, Align(4));
9756     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9757     return false;
9758   }
9759 
9760   // Fixed-length vectors are located in the corresponding scalable-vector
9761   // container types.
9762   if (ValVT.isFixedLengthVector())
9763     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
9764 
9765   // Split arguments might be passed indirectly, so keep track of the pending
9766   // values. Split vectors are passed via a mix of registers and indirectly, so
9767   // treat them as we would any other argument.
9768   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
9769     LocVT = XLenVT;
9770     LocInfo = CCValAssign::Indirect;
9771     PendingLocs.push_back(
9772         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
9773     PendingArgFlags.push_back(ArgFlags);
9774     if (!ArgFlags.isSplitEnd()) {
9775       return false;
9776     }
9777   }
9778 
9779   // If the split argument only had two elements, it should be passed directly
9780   // in registers or on the stack.
9781   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
9782       PendingLocs.size() <= 2) {
9783     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
9784     // Apply the normal calling convention rules to the first half of the
9785     // split argument.
9786     CCValAssign VA = PendingLocs[0];
9787     ISD::ArgFlagsTy AF = PendingArgFlags[0];
9788     PendingLocs.clear();
9789     PendingArgFlags.clear();
9790     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
9791                                ArgFlags);
9792   }
9793 
9794   // Allocate to a register if possible, or else a stack slot.
9795   Register Reg;
9796   unsigned StoreSizeBytes = XLen / 8;
9797   Align StackAlign = Align(XLen / 8);
9798 
9799   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
9800     Reg = State.AllocateReg(ArgFPR16s);
9801   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
9802     Reg = State.AllocateReg(ArgFPR32s);
9803   else if (ValVT == MVT::f64 && !UseGPRForF64)
9804     Reg = State.AllocateReg(ArgFPR64s);
9805   else if (ValVT.isVector()) {
9806     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
9807     if (!Reg) {
9808       // For return values, the vector must be passed fully via registers or
9809       // via the stack.
9810       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
9811       // but we're using all of them.
9812       if (IsRet)
9813         return true;
9814       // Try using a GPR to pass the address
9815       if ((Reg = State.AllocateReg(ArgGPRs))) {
9816         LocVT = XLenVT;
9817         LocInfo = CCValAssign::Indirect;
9818       } else if (ValVT.isScalableVector()) {
9819         LocVT = XLenVT;
9820         LocInfo = CCValAssign::Indirect;
9821       } else {
9822         // Pass fixed-length vectors on the stack.
9823         LocVT = ValVT;
9824         StoreSizeBytes = ValVT.getStoreSize();
9825         // Align vectors to their element sizes, being careful for vXi1
9826         // vectors.
9827         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
9828       }
9829     }
9830   } else {
9831     Reg = State.AllocateReg(ArgGPRs);
9832   }
9833 
9834   unsigned StackOffset =
9835       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
9836 
9837   // If we reach this point and PendingLocs is non-empty, we must be at the
9838   // end of a split argument that must be passed indirectly.
9839   if (!PendingLocs.empty()) {
9840     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
9841     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
9842 
9843     for (auto &It : PendingLocs) {
9844       if (Reg)
9845         It.convertToReg(Reg);
9846       else
9847         It.convertToMem(StackOffset);
9848       State.addLoc(It);
9849     }
9850     PendingLocs.clear();
9851     PendingArgFlags.clear();
9852     return false;
9853   }
9854 
9855   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
9856           (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) &&
9857          "Expected an XLenVT or vector types at this stage");
9858 
9859   if (Reg) {
9860     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
9861     return false;
9862   }
9863 
9864   // When a floating-point value is passed on the stack, no bit-conversion is
9865   // needed.
9866   if (ValVT.isFloatingPoint()) {
9867     LocVT = ValVT;
9868     LocInfo = CCValAssign::Full;
9869   }
9870   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
9871   return false;
9872 }
9873 
9874 template <typename ArgTy>
9875 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
9876   for (const auto &ArgIdx : enumerate(Args)) {
9877     MVT ArgVT = ArgIdx.value().VT;
9878     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
9879       return ArgIdx.index();
9880   }
9881   return None;
9882 }
9883 
9884 void RISCVTargetLowering::analyzeInputArgs(
9885     MachineFunction &MF, CCState &CCInfo,
9886     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
9887     RISCVCCAssignFn Fn) const {
9888   unsigned NumArgs = Ins.size();
9889   FunctionType *FType = MF.getFunction().getFunctionType();
9890 
9891   Optional<unsigned> FirstMaskArgument;
9892   if (Subtarget.hasVInstructions())
9893     FirstMaskArgument = preAssignMask(Ins);
9894 
9895   for (unsigned i = 0; i != NumArgs; ++i) {
9896     MVT ArgVT = Ins[i].VT;
9897     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
9898 
9899     Type *ArgTy = nullptr;
9900     if (IsRet)
9901       ArgTy = FType->getReturnType();
9902     else if (Ins[i].isOrigArg())
9903       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
9904 
9905     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9906     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9907            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
9908            FirstMaskArgument)) {
9909       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
9910                         << EVT(ArgVT).getEVTString() << '\n');
9911       llvm_unreachable(nullptr);
9912     }
9913   }
9914 }
9915 
9916 void RISCVTargetLowering::analyzeOutputArgs(
9917     MachineFunction &MF, CCState &CCInfo,
9918     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
9919     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
9920   unsigned NumArgs = Outs.size();
9921 
9922   Optional<unsigned> FirstMaskArgument;
9923   if (Subtarget.hasVInstructions())
9924     FirstMaskArgument = preAssignMask(Outs);
9925 
9926   for (unsigned i = 0; i != NumArgs; i++) {
9927     MVT ArgVT = Outs[i].VT;
9928     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
9929     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
9930 
9931     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
9932     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
9933            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
9934            FirstMaskArgument)) {
9935       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
9936                         << EVT(ArgVT).getEVTString() << "\n");
9937       llvm_unreachable(nullptr);
9938     }
9939   }
9940 }
9941 
9942 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
9943 // values.
9944 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
9945                                    const CCValAssign &VA, const SDLoc &DL,
9946                                    const RISCVSubtarget &Subtarget) {
9947   switch (VA.getLocInfo()) {
9948   default:
9949     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9950   case CCValAssign::Full:
9951     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
9952       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
9953     break;
9954   case CCValAssign::BCvt:
9955     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
9956       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
9957     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
9958       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
9959     else
9960       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
9961     break;
9962   }
9963   return Val;
9964 }
9965 
9966 // The caller is responsible for loading the full value if the argument is
9967 // passed with CCValAssign::Indirect.
9968 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
9969                                 const CCValAssign &VA, const SDLoc &DL,
9970                                 const RISCVTargetLowering &TLI) {
9971   MachineFunction &MF = DAG.getMachineFunction();
9972   MachineRegisterInfo &RegInfo = MF.getRegInfo();
9973   EVT LocVT = VA.getLocVT();
9974   SDValue Val;
9975   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
9976   Register VReg = RegInfo.createVirtualRegister(RC);
9977   RegInfo.addLiveIn(VA.getLocReg(), VReg);
9978   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
9979 
9980   if (VA.getLocInfo() == CCValAssign::Indirect)
9981     return Val;
9982 
9983   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
9984 }
9985 
9986 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
9987                                    const CCValAssign &VA, const SDLoc &DL,
9988                                    const RISCVSubtarget &Subtarget) {
9989   EVT LocVT = VA.getLocVT();
9990 
9991   switch (VA.getLocInfo()) {
9992   default:
9993     llvm_unreachable("Unexpected CCValAssign::LocInfo");
9994   case CCValAssign::Full:
9995     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
9996       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
9997     break;
9998   case CCValAssign::BCvt:
9999     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
10000       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
10001     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
10002       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
10003     else
10004       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
10005     break;
10006   }
10007   return Val;
10008 }
10009 
10010 // The caller is responsible for loading the full value if the argument is
10011 // passed with CCValAssign::Indirect.
10012 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
10013                                 const CCValAssign &VA, const SDLoc &DL) {
10014   MachineFunction &MF = DAG.getMachineFunction();
10015   MachineFrameInfo &MFI = MF.getFrameInfo();
10016   EVT LocVT = VA.getLocVT();
10017   EVT ValVT = VA.getValVT();
10018   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
10019   if (ValVT.isScalableVector()) {
10020     // When the value is a scalable vector, we save the pointer which points to
10021     // the scalable vector value in the stack. The ValVT will be the pointer
10022     // type, instead of the scalable vector type.
10023     ValVT = LocVT;
10024   }
10025   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
10026                                  /*IsImmutable=*/true);
10027   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
10028   SDValue Val;
10029 
10030   ISD::LoadExtType ExtType;
10031   switch (VA.getLocInfo()) {
10032   default:
10033     llvm_unreachable("Unexpected CCValAssign::LocInfo");
10034   case CCValAssign::Full:
10035   case CCValAssign::Indirect:
10036   case CCValAssign::BCvt:
10037     ExtType = ISD::NON_EXTLOAD;
10038     break;
10039   }
10040   Val = DAG.getExtLoad(
10041       ExtType, DL, LocVT, Chain, FIN,
10042       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
10043   return Val;
10044 }
10045 
10046 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
10047                                        const CCValAssign &VA, const SDLoc &DL) {
10048   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
10049          "Unexpected VA");
10050   MachineFunction &MF = DAG.getMachineFunction();
10051   MachineFrameInfo &MFI = MF.getFrameInfo();
10052   MachineRegisterInfo &RegInfo = MF.getRegInfo();
10053 
10054   if (VA.isMemLoc()) {
10055     // f64 is passed on the stack.
10056     int FI =
10057         MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true);
10058     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10059     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
10060                        MachinePointerInfo::getFixedStack(MF, FI));
10061   }
10062 
10063   assert(VA.isRegLoc() && "Expected register VA assignment");
10064 
10065   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10066   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
10067   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
10068   SDValue Hi;
10069   if (VA.getLocReg() == RISCV::X17) {
10070     // Second half of f64 is passed on the stack.
10071     int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true);
10072     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
10073     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
10074                      MachinePointerInfo::getFixedStack(MF, FI));
10075   } else {
10076     // Second half of f64 is passed in another GPR.
10077     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
10078     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
10079     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
10080   }
10081   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
10082 }
10083 
10084 // FastCC has less than 1% performance improvement for some particular
10085 // benchmark. But theoretically, it may has benenfit for some cases.
10086 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
10087                             unsigned ValNo, MVT ValVT, MVT LocVT,
10088                             CCValAssign::LocInfo LocInfo,
10089                             ISD::ArgFlagsTy ArgFlags, CCState &State,
10090                             bool IsFixed, bool IsRet, Type *OrigTy,
10091                             const RISCVTargetLowering &TLI,
10092                             Optional<unsigned> FirstMaskArgument) {
10093 
10094   // X5 and X6 might be used for save-restore libcall.
10095   static const MCPhysReg GPRList[] = {
10096       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
10097       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
10098       RISCV::X29, RISCV::X30, RISCV::X31};
10099 
10100   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10101     if (unsigned Reg = State.AllocateReg(GPRList)) {
10102       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10103       return false;
10104     }
10105   }
10106 
10107   if (LocVT == MVT::f16) {
10108     static const MCPhysReg FPR16List[] = {
10109         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
10110         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
10111         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
10112         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
10113     if (unsigned Reg = State.AllocateReg(FPR16List)) {
10114       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10115       return false;
10116     }
10117   }
10118 
10119   if (LocVT == MVT::f32) {
10120     static const MCPhysReg FPR32List[] = {
10121         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
10122         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
10123         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
10124         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
10125     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10126       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10127       return false;
10128     }
10129   }
10130 
10131   if (LocVT == MVT::f64) {
10132     static const MCPhysReg FPR64List[] = {
10133         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
10134         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
10135         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
10136         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
10137     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10138       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10139       return false;
10140     }
10141   }
10142 
10143   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
10144     unsigned Offset4 = State.AllocateStack(4, Align(4));
10145     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
10146     return false;
10147   }
10148 
10149   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
10150     unsigned Offset5 = State.AllocateStack(8, Align(8));
10151     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
10152     return false;
10153   }
10154 
10155   if (LocVT.isVector()) {
10156     if (unsigned Reg =
10157             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
10158       // Fixed-length vectors are located in the corresponding scalable-vector
10159       // container types.
10160       if (ValVT.isFixedLengthVector())
10161         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
10162       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10163     } else {
10164       // Try and pass the address via a "fast" GPR.
10165       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
10166         LocInfo = CCValAssign::Indirect;
10167         LocVT = TLI.getSubtarget().getXLenVT();
10168         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
10169       } else if (ValVT.isFixedLengthVector()) {
10170         auto StackAlign =
10171             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
10172         unsigned StackOffset =
10173             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
10174         State.addLoc(
10175             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
10176       } else {
10177         // Can't pass scalable vectors on the stack.
10178         return true;
10179       }
10180     }
10181 
10182     return false;
10183   }
10184 
10185   return true; // CC didn't match.
10186 }
10187 
10188 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
10189                          CCValAssign::LocInfo LocInfo,
10190                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
10191 
10192   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
10193     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
10194     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
10195     static const MCPhysReg GPRList[] = {
10196         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
10197         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
10198     if (unsigned Reg = State.AllocateReg(GPRList)) {
10199       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10200       return false;
10201     }
10202   }
10203 
10204   if (LocVT == MVT::f32) {
10205     // Pass in STG registers: F1, ..., F6
10206     //                        fs0 ... fs5
10207     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
10208                                           RISCV::F18_F, RISCV::F19_F,
10209                                           RISCV::F20_F, RISCV::F21_F};
10210     if (unsigned Reg = State.AllocateReg(FPR32List)) {
10211       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10212       return false;
10213     }
10214   }
10215 
10216   if (LocVT == MVT::f64) {
10217     // Pass in STG registers: D1, ..., D6
10218     //                        fs6 ... fs11
10219     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
10220                                           RISCV::F24_D, RISCV::F25_D,
10221                                           RISCV::F26_D, RISCV::F27_D};
10222     if (unsigned Reg = State.AllocateReg(FPR64List)) {
10223       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
10224       return false;
10225     }
10226   }
10227 
10228   report_fatal_error("No registers left in GHC calling convention");
10229   return true;
10230 }
10231 
10232 // Transform physical registers into virtual registers.
10233 SDValue RISCVTargetLowering::LowerFormalArguments(
10234     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
10235     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
10236     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
10237 
10238   MachineFunction &MF = DAG.getMachineFunction();
10239 
10240   switch (CallConv) {
10241   default:
10242     report_fatal_error("Unsupported calling convention");
10243   case CallingConv::C:
10244   case CallingConv::Fast:
10245     break;
10246   case CallingConv::GHC:
10247     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
10248         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
10249       report_fatal_error(
10250         "GHC calling convention requires the F and D instruction set extensions");
10251   }
10252 
10253   const Function &Func = MF.getFunction();
10254   if (Func.hasFnAttribute("interrupt")) {
10255     if (!Func.arg_empty())
10256       report_fatal_error(
10257         "Functions with the interrupt attribute cannot have arguments!");
10258 
10259     StringRef Kind =
10260       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10261 
10262     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
10263       report_fatal_error(
10264         "Function interrupt attribute argument not supported!");
10265   }
10266 
10267   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10268   MVT XLenVT = Subtarget.getXLenVT();
10269   unsigned XLenInBytes = Subtarget.getXLen() / 8;
10270   // Used with vargs to acumulate store chains.
10271   std::vector<SDValue> OutChains;
10272 
10273   // Assign locations to all of the incoming arguments.
10274   SmallVector<CCValAssign, 16> ArgLocs;
10275   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10276 
10277   if (CallConv == CallingConv::GHC)
10278     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
10279   else
10280     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
10281                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10282                                                    : CC_RISCV);
10283 
10284   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
10285     CCValAssign &VA = ArgLocs[i];
10286     SDValue ArgValue;
10287     // Passing f64 on RV32D with a soft float ABI must be handled as a special
10288     // case.
10289     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
10290       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
10291     else if (VA.isRegLoc())
10292       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
10293     else
10294       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
10295 
10296     if (VA.getLocInfo() == CCValAssign::Indirect) {
10297       // If the original argument was split and passed by reference (e.g. i128
10298       // on RV32), we need to load all parts of it here (using the same
10299       // address). Vectors may be partly split to registers and partly to the
10300       // stack, in which case the base address is partly offset and subsequent
10301       // stores are relative to that.
10302       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
10303                                    MachinePointerInfo()));
10304       unsigned ArgIndex = Ins[i].OrigArgIndex;
10305       unsigned ArgPartOffset = Ins[i].PartOffset;
10306       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10307       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
10308         CCValAssign &PartVA = ArgLocs[i + 1];
10309         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
10310         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10311         if (PartVA.getValVT().isScalableVector())
10312           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10313         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
10314         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
10315                                      MachinePointerInfo()));
10316         ++i;
10317       }
10318       continue;
10319     }
10320     InVals.push_back(ArgValue);
10321   }
10322 
10323   if (IsVarArg) {
10324     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
10325     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
10326     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
10327     MachineFrameInfo &MFI = MF.getFrameInfo();
10328     MachineRegisterInfo &RegInfo = MF.getRegInfo();
10329     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
10330 
10331     // Offset of the first variable argument from stack pointer, and size of
10332     // the vararg save area. For now, the varargs save area is either zero or
10333     // large enough to hold a0-a7.
10334     int VaArgOffset, VarArgsSaveSize;
10335 
10336     // If all registers are allocated, then all varargs must be passed on the
10337     // stack and we don't need to save any argregs.
10338     if (ArgRegs.size() == Idx) {
10339       VaArgOffset = CCInfo.getNextStackOffset();
10340       VarArgsSaveSize = 0;
10341     } else {
10342       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
10343       VaArgOffset = -VarArgsSaveSize;
10344     }
10345 
10346     // Record the frame index of the first variable argument
10347     // which is a value necessary to VASTART.
10348     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10349     RVFI->setVarArgsFrameIndex(FI);
10350 
10351     // If saving an odd number of registers then create an extra stack slot to
10352     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
10353     // offsets to even-numbered registered remain 2*XLEN-aligned.
10354     if (Idx % 2) {
10355       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
10356       VarArgsSaveSize += XLenInBytes;
10357     }
10358 
10359     // Copy the integer registers that may have been used for passing varargs
10360     // to the vararg save area.
10361     for (unsigned I = Idx; I < ArgRegs.size();
10362          ++I, VaArgOffset += XLenInBytes) {
10363       const Register Reg = RegInfo.createVirtualRegister(RC);
10364       RegInfo.addLiveIn(ArgRegs[I], Reg);
10365       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
10366       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
10367       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10368       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
10369                                    MachinePointerInfo::getFixedStack(MF, FI));
10370       cast<StoreSDNode>(Store.getNode())
10371           ->getMemOperand()
10372           ->setValue((Value *)nullptr);
10373       OutChains.push_back(Store);
10374     }
10375     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
10376   }
10377 
10378   // All stores are grouped in one node to allow the matching between
10379   // the size of Ins and InVals. This only happens for vararg functions.
10380   if (!OutChains.empty()) {
10381     OutChains.push_back(Chain);
10382     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
10383   }
10384 
10385   return Chain;
10386 }
10387 
10388 /// isEligibleForTailCallOptimization - Check whether the call is eligible
10389 /// for tail call optimization.
10390 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
10391 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
10392     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
10393     const SmallVector<CCValAssign, 16> &ArgLocs) const {
10394 
10395   auto &Callee = CLI.Callee;
10396   auto CalleeCC = CLI.CallConv;
10397   auto &Outs = CLI.Outs;
10398   auto &Caller = MF.getFunction();
10399   auto CallerCC = Caller.getCallingConv();
10400 
10401   // Exception-handling functions need a special set of instructions to
10402   // indicate a return to the hardware. Tail-calling another function would
10403   // probably break this.
10404   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
10405   // should be expanded as new function attributes are introduced.
10406   if (Caller.hasFnAttribute("interrupt"))
10407     return false;
10408 
10409   // Do not tail call opt if the stack is used to pass parameters.
10410   if (CCInfo.getNextStackOffset() != 0)
10411     return false;
10412 
10413   // Do not tail call opt if any parameters need to be passed indirectly.
10414   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
10415   // passed indirectly. So the address of the value will be passed in a
10416   // register, or if not available, then the address is put on the stack. In
10417   // order to pass indirectly, space on the stack often needs to be allocated
10418   // in order to store the value. In this case the CCInfo.getNextStackOffset()
10419   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
10420   // are passed CCValAssign::Indirect.
10421   for (auto &VA : ArgLocs)
10422     if (VA.getLocInfo() == CCValAssign::Indirect)
10423       return false;
10424 
10425   // Do not tail call opt if either caller or callee uses struct return
10426   // semantics.
10427   auto IsCallerStructRet = Caller.hasStructRetAttr();
10428   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
10429   if (IsCallerStructRet || IsCalleeStructRet)
10430     return false;
10431 
10432   // Externally-defined functions with weak linkage should not be
10433   // tail-called. The behaviour of branch instructions in this situation (as
10434   // used for tail calls) is implementation-defined, so we cannot rely on the
10435   // linker replacing the tail call with a return.
10436   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
10437     const GlobalValue *GV = G->getGlobal();
10438     if (GV->hasExternalWeakLinkage())
10439       return false;
10440   }
10441 
10442   // The callee has to preserve all registers the caller needs to preserve.
10443   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
10444   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
10445   if (CalleeCC != CallerCC) {
10446     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
10447     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
10448       return false;
10449   }
10450 
10451   // Byval parameters hand the function a pointer directly into the stack area
10452   // we want to reuse during a tail call. Working around this *is* possible
10453   // but less efficient and uglier in LowerCall.
10454   for (auto &Arg : Outs)
10455     if (Arg.Flags.isByVal())
10456       return false;
10457 
10458   return true;
10459 }
10460 
10461 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
10462   return DAG.getDataLayout().getPrefTypeAlign(
10463       VT.getTypeForEVT(*DAG.getContext()));
10464 }
10465 
10466 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
10467 // and output parameter nodes.
10468 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
10469                                        SmallVectorImpl<SDValue> &InVals) const {
10470   SelectionDAG &DAG = CLI.DAG;
10471   SDLoc &DL = CLI.DL;
10472   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
10473   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
10474   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
10475   SDValue Chain = CLI.Chain;
10476   SDValue Callee = CLI.Callee;
10477   bool &IsTailCall = CLI.IsTailCall;
10478   CallingConv::ID CallConv = CLI.CallConv;
10479   bool IsVarArg = CLI.IsVarArg;
10480   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10481   MVT XLenVT = Subtarget.getXLenVT();
10482 
10483   MachineFunction &MF = DAG.getMachineFunction();
10484 
10485   // Analyze the operands of the call, assigning locations to each operand.
10486   SmallVector<CCValAssign, 16> ArgLocs;
10487   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
10488 
10489   if (CallConv == CallingConv::GHC)
10490     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
10491   else
10492     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
10493                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
10494                                                     : CC_RISCV);
10495 
10496   // Check if it's really possible to do a tail call.
10497   if (IsTailCall)
10498     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
10499 
10500   if (IsTailCall)
10501     ++NumTailCalls;
10502   else if (CLI.CB && CLI.CB->isMustTailCall())
10503     report_fatal_error("failed to perform tail call elimination on a call "
10504                        "site marked musttail");
10505 
10506   // Get a count of how many bytes are to be pushed on the stack.
10507   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
10508 
10509   // Create local copies for byval args
10510   SmallVector<SDValue, 8> ByValArgs;
10511   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10512     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10513     if (!Flags.isByVal())
10514       continue;
10515 
10516     SDValue Arg = OutVals[i];
10517     unsigned Size = Flags.getByValSize();
10518     Align Alignment = Flags.getNonZeroByValAlign();
10519 
10520     int FI =
10521         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
10522     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
10523     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
10524 
10525     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
10526                           /*IsVolatile=*/false,
10527                           /*AlwaysInline=*/false, IsTailCall,
10528                           MachinePointerInfo(), MachinePointerInfo());
10529     ByValArgs.push_back(FIPtr);
10530   }
10531 
10532   if (!IsTailCall)
10533     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
10534 
10535   // Copy argument values to their designated locations.
10536   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
10537   SmallVector<SDValue, 8> MemOpChains;
10538   SDValue StackPtr;
10539   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
10540     CCValAssign &VA = ArgLocs[i];
10541     SDValue ArgValue = OutVals[i];
10542     ISD::ArgFlagsTy Flags = Outs[i].Flags;
10543 
10544     // Handle passing f64 on RV32D with a soft float ABI as a special case.
10545     bool IsF64OnRV32DSoftABI =
10546         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
10547     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
10548       SDValue SplitF64 = DAG.getNode(
10549           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
10550       SDValue Lo = SplitF64.getValue(0);
10551       SDValue Hi = SplitF64.getValue(1);
10552 
10553       Register RegLo = VA.getLocReg();
10554       RegsToPass.push_back(std::make_pair(RegLo, Lo));
10555 
10556       if (RegLo == RISCV::X17) {
10557         // Second half of f64 is passed on the stack.
10558         // Work out the address of the stack slot.
10559         if (!StackPtr.getNode())
10560           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10561         // Emit the store.
10562         MemOpChains.push_back(
10563             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
10564       } else {
10565         // Second half of f64 is passed in another GPR.
10566         assert(RegLo < RISCV::X31 && "Invalid register pair");
10567         Register RegHigh = RegLo + 1;
10568         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
10569       }
10570       continue;
10571     }
10572 
10573     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
10574     // as any other MemLoc.
10575 
10576     // Promote the value if needed.
10577     // For now, only handle fully promoted and indirect arguments.
10578     if (VA.getLocInfo() == CCValAssign::Indirect) {
10579       // Store the argument in a stack slot and pass its address.
10580       Align StackAlign =
10581           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
10582                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
10583       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
10584       // If the original argument was split (e.g. i128), we need
10585       // to store the required parts of it here (and pass just one address).
10586       // Vectors may be partly split to registers and partly to the stack, in
10587       // which case the base address is partly offset and subsequent stores are
10588       // relative to that.
10589       unsigned ArgIndex = Outs[i].OrigArgIndex;
10590       unsigned ArgPartOffset = Outs[i].PartOffset;
10591       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
10592       // Calculate the total size to store. We don't have access to what we're
10593       // actually storing other than performing the loop and collecting the
10594       // info.
10595       SmallVector<std::pair<SDValue, SDValue>> Parts;
10596       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
10597         SDValue PartValue = OutVals[i + 1];
10598         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
10599         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
10600         EVT PartVT = PartValue.getValueType();
10601         if (PartVT.isScalableVector())
10602           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
10603         StoredSize += PartVT.getStoreSize();
10604         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
10605         Parts.push_back(std::make_pair(PartValue, Offset));
10606         ++i;
10607       }
10608       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
10609       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
10610       MemOpChains.push_back(
10611           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
10612                        MachinePointerInfo::getFixedStack(MF, FI)));
10613       for (const auto &Part : Parts) {
10614         SDValue PartValue = Part.first;
10615         SDValue PartOffset = Part.second;
10616         SDValue Address =
10617             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
10618         MemOpChains.push_back(
10619             DAG.getStore(Chain, DL, PartValue, Address,
10620                          MachinePointerInfo::getFixedStack(MF, FI)));
10621       }
10622       ArgValue = SpillSlot;
10623     } else {
10624       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
10625     }
10626 
10627     // Use local copy if it is a byval arg.
10628     if (Flags.isByVal())
10629       ArgValue = ByValArgs[j++];
10630 
10631     if (VA.isRegLoc()) {
10632       // Queue up the argument copies and emit them at the end.
10633       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
10634     } else {
10635       assert(VA.isMemLoc() && "Argument not register or memory");
10636       assert(!IsTailCall && "Tail call not allowed if stack is used "
10637                             "for passing parameters");
10638 
10639       // Work out the address of the stack slot.
10640       if (!StackPtr.getNode())
10641         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
10642       SDValue Address =
10643           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
10644                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
10645 
10646       // Emit the store.
10647       MemOpChains.push_back(
10648           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
10649     }
10650   }
10651 
10652   // Join the stores, which are independent of one another.
10653   if (!MemOpChains.empty())
10654     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
10655 
10656   SDValue Glue;
10657 
10658   // Build a sequence of copy-to-reg nodes, chained and glued together.
10659   for (auto &Reg : RegsToPass) {
10660     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
10661     Glue = Chain.getValue(1);
10662   }
10663 
10664   // Validate that none of the argument registers have been marked as
10665   // reserved, if so report an error. Do the same for the return address if this
10666   // is not a tailcall.
10667   validateCCReservedRegs(RegsToPass, MF);
10668   if (!IsTailCall &&
10669       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
10670     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10671         MF.getFunction(),
10672         "Return address register required, but has been reserved."});
10673 
10674   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
10675   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
10676   // split it and then direct call can be matched by PseudoCALL.
10677   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
10678     const GlobalValue *GV = S->getGlobal();
10679 
10680     unsigned OpFlags = RISCVII::MO_CALL;
10681     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
10682       OpFlags = RISCVII::MO_PLT;
10683 
10684     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
10685   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
10686     unsigned OpFlags = RISCVII::MO_CALL;
10687 
10688     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
10689                                                  nullptr))
10690       OpFlags = RISCVII::MO_PLT;
10691 
10692     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
10693   }
10694 
10695   // The first call operand is the chain and the second is the target address.
10696   SmallVector<SDValue, 8> Ops;
10697   Ops.push_back(Chain);
10698   Ops.push_back(Callee);
10699 
10700   // Add argument registers to the end of the list so that they are
10701   // known live into the call.
10702   for (auto &Reg : RegsToPass)
10703     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
10704 
10705   if (!IsTailCall) {
10706     // Add a register mask operand representing the call-preserved registers.
10707     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
10708     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
10709     assert(Mask && "Missing call preserved mask for calling convention");
10710     Ops.push_back(DAG.getRegisterMask(Mask));
10711   }
10712 
10713   // Glue the call to the argument copies, if any.
10714   if (Glue.getNode())
10715     Ops.push_back(Glue);
10716 
10717   // Emit the call.
10718   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10719 
10720   if (IsTailCall) {
10721     MF.getFrameInfo().setHasTailCall();
10722     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
10723   }
10724 
10725   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
10726   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
10727   Glue = Chain.getValue(1);
10728 
10729   // Mark the end of the call, which is glued to the call itself.
10730   Chain = DAG.getCALLSEQ_END(Chain,
10731                              DAG.getConstant(NumBytes, DL, PtrVT, true),
10732                              DAG.getConstant(0, DL, PtrVT, true),
10733                              Glue, DL);
10734   Glue = Chain.getValue(1);
10735 
10736   // Assign locations to each value returned by this call.
10737   SmallVector<CCValAssign, 16> RVLocs;
10738   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
10739   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
10740 
10741   // Copy all of the result registers out of their specified physreg.
10742   for (auto &VA : RVLocs) {
10743     // Copy the value out
10744     SDValue RetValue =
10745         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
10746     // Glue the RetValue to the end of the call sequence
10747     Chain = RetValue.getValue(1);
10748     Glue = RetValue.getValue(2);
10749 
10750     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10751       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
10752       SDValue RetValue2 =
10753           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
10754       Chain = RetValue2.getValue(1);
10755       Glue = RetValue2.getValue(2);
10756       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
10757                              RetValue2);
10758     }
10759 
10760     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
10761 
10762     InVals.push_back(RetValue);
10763   }
10764 
10765   return Chain;
10766 }
10767 
10768 bool RISCVTargetLowering::CanLowerReturn(
10769     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
10770     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
10771   SmallVector<CCValAssign, 16> RVLocs;
10772   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
10773 
10774   Optional<unsigned> FirstMaskArgument;
10775   if (Subtarget.hasVInstructions())
10776     FirstMaskArgument = preAssignMask(Outs);
10777 
10778   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
10779     MVT VT = Outs[i].VT;
10780     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
10781     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
10782     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
10783                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
10784                  *this, FirstMaskArgument))
10785       return false;
10786   }
10787   return true;
10788 }
10789 
10790 SDValue
10791 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
10792                                  bool IsVarArg,
10793                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
10794                                  const SmallVectorImpl<SDValue> &OutVals,
10795                                  const SDLoc &DL, SelectionDAG &DAG) const {
10796   const MachineFunction &MF = DAG.getMachineFunction();
10797   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10798 
10799   // Stores the assignment of the return value to a location.
10800   SmallVector<CCValAssign, 16> RVLocs;
10801 
10802   // Info about the registers and stack slot.
10803   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
10804                  *DAG.getContext());
10805 
10806   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
10807                     nullptr, CC_RISCV);
10808 
10809   if (CallConv == CallingConv::GHC && !RVLocs.empty())
10810     report_fatal_error("GHC functions return void only");
10811 
10812   SDValue Glue;
10813   SmallVector<SDValue, 4> RetOps(1, Chain);
10814 
10815   // Copy the result values into the output registers.
10816   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
10817     SDValue Val = OutVals[i];
10818     CCValAssign &VA = RVLocs[i];
10819     assert(VA.isRegLoc() && "Can only return in registers!");
10820 
10821     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
10822       // Handle returning f64 on RV32D with a soft float ABI.
10823       assert(VA.isRegLoc() && "Expected return via registers");
10824       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
10825                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
10826       SDValue Lo = SplitF64.getValue(0);
10827       SDValue Hi = SplitF64.getValue(1);
10828       Register RegLo = VA.getLocReg();
10829       assert(RegLo < RISCV::X31 && "Invalid register pair");
10830       Register RegHi = RegLo + 1;
10831 
10832       if (STI.isRegisterReservedByUser(RegLo) ||
10833           STI.isRegisterReservedByUser(RegHi))
10834         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10835             MF.getFunction(),
10836             "Return value register required, but has been reserved."});
10837 
10838       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
10839       Glue = Chain.getValue(1);
10840       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
10841       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
10842       Glue = Chain.getValue(1);
10843       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
10844     } else {
10845       // Handle a 'normal' return.
10846       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
10847       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
10848 
10849       if (STI.isRegisterReservedByUser(VA.getLocReg()))
10850         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
10851             MF.getFunction(),
10852             "Return value register required, but has been reserved."});
10853 
10854       // Guarantee that all emitted copies are stuck together.
10855       Glue = Chain.getValue(1);
10856       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
10857     }
10858   }
10859 
10860   RetOps[0] = Chain; // Update chain.
10861 
10862   // Add the glue node if we have it.
10863   if (Glue.getNode()) {
10864     RetOps.push_back(Glue);
10865   }
10866 
10867   unsigned RetOpc = RISCVISD::RET_FLAG;
10868   // Interrupt service routines use different return instructions.
10869   const Function &Func = DAG.getMachineFunction().getFunction();
10870   if (Func.hasFnAttribute("interrupt")) {
10871     if (!Func.getReturnType()->isVoidTy())
10872       report_fatal_error(
10873           "Functions with the interrupt attribute must have void return type!");
10874 
10875     MachineFunction &MF = DAG.getMachineFunction();
10876     StringRef Kind =
10877       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
10878 
10879     if (Kind == "user")
10880       RetOpc = RISCVISD::URET_FLAG;
10881     else if (Kind == "supervisor")
10882       RetOpc = RISCVISD::SRET_FLAG;
10883     else
10884       RetOpc = RISCVISD::MRET_FLAG;
10885   }
10886 
10887   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
10888 }
10889 
10890 void RISCVTargetLowering::validateCCReservedRegs(
10891     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
10892     MachineFunction &MF) const {
10893   const Function &F = MF.getFunction();
10894   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
10895 
10896   if (llvm::any_of(Regs, [&STI](auto Reg) {
10897         return STI.isRegisterReservedByUser(Reg.first);
10898       }))
10899     F.getContext().diagnose(DiagnosticInfoUnsupported{
10900         F, "Argument register required, but has been reserved."});
10901 }
10902 
10903 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
10904   return CI->isTailCall();
10905 }
10906 
10907 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
10908 #define NODE_NAME_CASE(NODE)                                                   \
10909   case RISCVISD::NODE:                                                         \
10910     return "RISCVISD::" #NODE;
10911   // clang-format off
10912   switch ((RISCVISD::NodeType)Opcode) {
10913   case RISCVISD::FIRST_NUMBER:
10914     break;
10915   NODE_NAME_CASE(RET_FLAG)
10916   NODE_NAME_CASE(URET_FLAG)
10917   NODE_NAME_CASE(SRET_FLAG)
10918   NODE_NAME_CASE(MRET_FLAG)
10919   NODE_NAME_CASE(CALL)
10920   NODE_NAME_CASE(SELECT_CC)
10921   NODE_NAME_CASE(BR_CC)
10922   NODE_NAME_CASE(BuildPairF64)
10923   NODE_NAME_CASE(SplitF64)
10924   NODE_NAME_CASE(TAIL)
10925   NODE_NAME_CASE(MULHSU)
10926   NODE_NAME_CASE(SLLW)
10927   NODE_NAME_CASE(SRAW)
10928   NODE_NAME_CASE(SRLW)
10929   NODE_NAME_CASE(DIVW)
10930   NODE_NAME_CASE(DIVUW)
10931   NODE_NAME_CASE(REMUW)
10932   NODE_NAME_CASE(ROLW)
10933   NODE_NAME_CASE(RORW)
10934   NODE_NAME_CASE(CLZW)
10935   NODE_NAME_CASE(CTZW)
10936   NODE_NAME_CASE(FSLW)
10937   NODE_NAME_CASE(FSRW)
10938   NODE_NAME_CASE(FSL)
10939   NODE_NAME_CASE(FSR)
10940   NODE_NAME_CASE(FMV_H_X)
10941   NODE_NAME_CASE(FMV_X_ANYEXTH)
10942   NODE_NAME_CASE(FMV_X_SIGNEXTH)
10943   NODE_NAME_CASE(FMV_W_X_RV64)
10944   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
10945   NODE_NAME_CASE(FCVT_X)
10946   NODE_NAME_CASE(FCVT_XU)
10947   NODE_NAME_CASE(FCVT_W_RV64)
10948   NODE_NAME_CASE(FCVT_WU_RV64)
10949   NODE_NAME_CASE(STRICT_FCVT_W_RV64)
10950   NODE_NAME_CASE(STRICT_FCVT_WU_RV64)
10951   NODE_NAME_CASE(READ_CYCLE_WIDE)
10952   NODE_NAME_CASE(GREV)
10953   NODE_NAME_CASE(GREVW)
10954   NODE_NAME_CASE(GORC)
10955   NODE_NAME_CASE(GORCW)
10956   NODE_NAME_CASE(SHFL)
10957   NODE_NAME_CASE(SHFLW)
10958   NODE_NAME_CASE(UNSHFL)
10959   NODE_NAME_CASE(UNSHFLW)
10960   NODE_NAME_CASE(BFP)
10961   NODE_NAME_CASE(BFPW)
10962   NODE_NAME_CASE(BCOMPRESS)
10963   NODE_NAME_CASE(BCOMPRESSW)
10964   NODE_NAME_CASE(BDECOMPRESS)
10965   NODE_NAME_CASE(BDECOMPRESSW)
10966   NODE_NAME_CASE(VMV_V_X_VL)
10967   NODE_NAME_CASE(VFMV_V_F_VL)
10968   NODE_NAME_CASE(VMV_X_S)
10969   NODE_NAME_CASE(VMV_S_X_VL)
10970   NODE_NAME_CASE(VFMV_S_F_VL)
10971   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
10972   NODE_NAME_CASE(READ_VLENB)
10973   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
10974   NODE_NAME_CASE(VSLIDEUP_VL)
10975   NODE_NAME_CASE(VSLIDE1UP_VL)
10976   NODE_NAME_CASE(VSLIDEDOWN_VL)
10977   NODE_NAME_CASE(VSLIDE1DOWN_VL)
10978   NODE_NAME_CASE(VID_VL)
10979   NODE_NAME_CASE(VFNCVT_ROD_VL)
10980   NODE_NAME_CASE(VECREDUCE_ADD_VL)
10981   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
10982   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
10983   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
10984   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
10985   NODE_NAME_CASE(VECREDUCE_AND_VL)
10986   NODE_NAME_CASE(VECREDUCE_OR_VL)
10987   NODE_NAME_CASE(VECREDUCE_XOR_VL)
10988   NODE_NAME_CASE(VECREDUCE_FADD_VL)
10989   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
10990   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
10991   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
10992   NODE_NAME_CASE(ADD_VL)
10993   NODE_NAME_CASE(AND_VL)
10994   NODE_NAME_CASE(MUL_VL)
10995   NODE_NAME_CASE(OR_VL)
10996   NODE_NAME_CASE(SDIV_VL)
10997   NODE_NAME_CASE(SHL_VL)
10998   NODE_NAME_CASE(SREM_VL)
10999   NODE_NAME_CASE(SRA_VL)
11000   NODE_NAME_CASE(SRL_VL)
11001   NODE_NAME_CASE(SUB_VL)
11002   NODE_NAME_CASE(UDIV_VL)
11003   NODE_NAME_CASE(UREM_VL)
11004   NODE_NAME_CASE(XOR_VL)
11005   NODE_NAME_CASE(SADDSAT_VL)
11006   NODE_NAME_CASE(UADDSAT_VL)
11007   NODE_NAME_CASE(SSUBSAT_VL)
11008   NODE_NAME_CASE(USUBSAT_VL)
11009   NODE_NAME_CASE(FADD_VL)
11010   NODE_NAME_CASE(FSUB_VL)
11011   NODE_NAME_CASE(FMUL_VL)
11012   NODE_NAME_CASE(FDIV_VL)
11013   NODE_NAME_CASE(FNEG_VL)
11014   NODE_NAME_CASE(FABS_VL)
11015   NODE_NAME_CASE(FSQRT_VL)
11016   NODE_NAME_CASE(FMA_VL)
11017   NODE_NAME_CASE(FCOPYSIGN_VL)
11018   NODE_NAME_CASE(SMIN_VL)
11019   NODE_NAME_CASE(SMAX_VL)
11020   NODE_NAME_CASE(UMIN_VL)
11021   NODE_NAME_CASE(UMAX_VL)
11022   NODE_NAME_CASE(FMINNUM_VL)
11023   NODE_NAME_CASE(FMAXNUM_VL)
11024   NODE_NAME_CASE(MULHS_VL)
11025   NODE_NAME_CASE(MULHU_VL)
11026   NODE_NAME_CASE(FP_TO_SINT_VL)
11027   NODE_NAME_CASE(FP_TO_UINT_VL)
11028   NODE_NAME_CASE(SINT_TO_FP_VL)
11029   NODE_NAME_CASE(UINT_TO_FP_VL)
11030   NODE_NAME_CASE(FP_EXTEND_VL)
11031   NODE_NAME_CASE(FP_ROUND_VL)
11032   NODE_NAME_CASE(VWMUL_VL)
11033   NODE_NAME_CASE(VWMULU_VL)
11034   NODE_NAME_CASE(VWMULSU_VL)
11035   NODE_NAME_CASE(VWADD_VL)
11036   NODE_NAME_CASE(VWADDU_VL)
11037   NODE_NAME_CASE(VWSUB_VL)
11038   NODE_NAME_CASE(VWSUBU_VL)
11039   NODE_NAME_CASE(VWADD_W_VL)
11040   NODE_NAME_CASE(VWADDU_W_VL)
11041   NODE_NAME_CASE(VWSUB_W_VL)
11042   NODE_NAME_CASE(VWSUBU_W_VL)
11043   NODE_NAME_CASE(SETCC_VL)
11044   NODE_NAME_CASE(VSELECT_VL)
11045   NODE_NAME_CASE(VP_MERGE_VL)
11046   NODE_NAME_CASE(VMAND_VL)
11047   NODE_NAME_CASE(VMOR_VL)
11048   NODE_NAME_CASE(VMXOR_VL)
11049   NODE_NAME_CASE(VMCLR_VL)
11050   NODE_NAME_CASE(VMSET_VL)
11051   NODE_NAME_CASE(VRGATHER_VX_VL)
11052   NODE_NAME_CASE(VRGATHER_VV_VL)
11053   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
11054   NODE_NAME_CASE(VSEXT_VL)
11055   NODE_NAME_CASE(VZEXT_VL)
11056   NODE_NAME_CASE(VCPOP_VL)
11057   NODE_NAME_CASE(READ_CSR)
11058   NODE_NAME_CASE(WRITE_CSR)
11059   NODE_NAME_CASE(SWAP_CSR)
11060   }
11061   // clang-format on
11062   return nullptr;
11063 #undef NODE_NAME_CASE
11064 }
11065 
11066 /// getConstraintType - Given a constraint letter, return the type of
11067 /// constraint it is for this target.
11068 RISCVTargetLowering::ConstraintType
11069 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
11070   if (Constraint.size() == 1) {
11071     switch (Constraint[0]) {
11072     default:
11073       break;
11074     case 'f':
11075       return C_RegisterClass;
11076     case 'I':
11077     case 'J':
11078     case 'K':
11079       return C_Immediate;
11080     case 'A':
11081       return C_Memory;
11082     case 'S': // A symbolic address
11083       return C_Other;
11084     }
11085   } else {
11086     if (Constraint == "vr" || Constraint == "vm")
11087       return C_RegisterClass;
11088   }
11089   return TargetLowering::getConstraintType(Constraint);
11090 }
11091 
11092 std::pair<unsigned, const TargetRegisterClass *>
11093 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
11094                                                   StringRef Constraint,
11095                                                   MVT VT) const {
11096   // First, see if this is a constraint that directly corresponds to a
11097   // RISCV register class.
11098   if (Constraint.size() == 1) {
11099     switch (Constraint[0]) {
11100     case 'r':
11101       // TODO: Support fixed vectors up to XLen for P extension?
11102       if (VT.isVector())
11103         break;
11104       return std::make_pair(0U, &RISCV::GPRRegClass);
11105     case 'f':
11106       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
11107         return std::make_pair(0U, &RISCV::FPR16RegClass);
11108       if (Subtarget.hasStdExtF() && VT == MVT::f32)
11109         return std::make_pair(0U, &RISCV::FPR32RegClass);
11110       if (Subtarget.hasStdExtD() && VT == MVT::f64)
11111         return std::make_pair(0U, &RISCV::FPR64RegClass);
11112       break;
11113     default:
11114       break;
11115     }
11116   } else if (Constraint == "vr") {
11117     for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
11118                            &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11119       if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
11120         return std::make_pair(0U, RC);
11121     }
11122   } else if (Constraint == "vm") {
11123     if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy))
11124       return std::make_pair(0U, &RISCV::VMV0RegClass);
11125   }
11126 
11127   // Clang will correctly decode the usage of register name aliases into their
11128   // official names. However, other frontends like `rustc` do not. This allows
11129   // users of these frontends to use the ABI names for registers in LLVM-style
11130   // register constraints.
11131   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
11132                                .Case("{zero}", RISCV::X0)
11133                                .Case("{ra}", RISCV::X1)
11134                                .Case("{sp}", RISCV::X2)
11135                                .Case("{gp}", RISCV::X3)
11136                                .Case("{tp}", RISCV::X4)
11137                                .Case("{t0}", RISCV::X5)
11138                                .Case("{t1}", RISCV::X6)
11139                                .Case("{t2}", RISCV::X7)
11140                                .Cases("{s0}", "{fp}", RISCV::X8)
11141                                .Case("{s1}", RISCV::X9)
11142                                .Case("{a0}", RISCV::X10)
11143                                .Case("{a1}", RISCV::X11)
11144                                .Case("{a2}", RISCV::X12)
11145                                .Case("{a3}", RISCV::X13)
11146                                .Case("{a4}", RISCV::X14)
11147                                .Case("{a5}", RISCV::X15)
11148                                .Case("{a6}", RISCV::X16)
11149                                .Case("{a7}", RISCV::X17)
11150                                .Case("{s2}", RISCV::X18)
11151                                .Case("{s3}", RISCV::X19)
11152                                .Case("{s4}", RISCV::X20)
11153                                .Case("{s5}", RISCV::X21)
11154                                .Case("{s6}", RISCV::X22)
11155                                .Case("{s7}", RISCV::X23)
11156                                .Case("{s8}", RISCV::X24)
11157                                .Case("{s9}", RISCV::X25)
11158                                .Case("{s10}", RISCV::X26)
11159                                .Case("{s11}", RISCV::X27)
11160                                .Case("{t3}", RISCV::X28)
11161                                .Case("{t4}", RISCV::X29)
11162                                .Case("{t5}", RISCV::X30)
11163                                .Case("{t6}", RISCV::X31)
11164                                .Default(RISCV::NoRegister);
11165   if (XRegFromAlias != RISCV::NoRegister)
11166     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
11167 
11168   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
11169   // TableGen record rather than the AsmName to choose registers for InlineAsm
11170   // constraints, plus we want to match those names to the widest floating point
11171   // register type available, manually select floating point registers here.
11172   //
11173   // The second case is the ABI name of the register, so that frontends can also
11174   // use the ABI names in register constraint lists.
11175   if (Subtarget.hasStdExtF()) {
11176     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
11177                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
11178                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
11179                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
11180                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
11181                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
11182                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
11183                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
11184                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
11185                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
11186                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
11187                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
11188                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
11189                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
11190                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
11191                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
11192                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
11193                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
11194                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
11195                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
11196                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
11197                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
11198                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
11199                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
11200                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
11201                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
11202                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
11203                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
11204                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
11205                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
11206                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
11207                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
11208                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
11209                         .Default(RISCV::NoRegister);
11210     if (FReg != RISCV::NoRegister) {
11211       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
11212       if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) {
11213         unsigned RegNo = FReg - RISCV::F0_F;
11214         unsigned DReg = RISCV::F0_D + RegNo;
11215         return std::make_pair(DReg, &RISCV::FPR64RegClass);
11216       }
11217       if (VT == MVT::f32 || VT == MVT::Other)
11218         return std::make_pair(FReg, &RISCV::FPR32RegClass);
11219       if (Subtarget.hasStdExtZfh() && VT == MVT::f16) {
11220         unsigned RegNo = FReg - RISCV::F0_F;
11221         unsigned HReg = RISCV::F0_H + RegNo;
11222         return std::make_pair(HReg, &RISCV::FPR16RegClass);
11223       }
11224     }
11225   }
11226 
11227   if (Subtarget.hasVInstructions()) {
11228     Register VReg = StringSwitch<Register>(Constraint.lower())
11229                         .Case("{v0}", RISCV::V0)
11230                         .Case("{v1}", RISCV::V1)
11231                         .Case("{v2}", RISCV::V2)
11232                         .Case("{v3}", RISCV::V3)
11233                         .Case("{v4}", RISCV::V4)
11234                         .Case("{v5}", RISCV::V5)
11235                         .Case("{v6}", RISCV::V6)
11236                         .Case("{v7}", RISCV::V7)
11237                         .Case("{v8}", RISCV::V8)
11238                         .Case("{v9}", RISCV::V9)
11239                         .Case("{v10}", RISCV::V10)
11240                         .Case("{v11}", RISCV::V11)
11241                         .Case("{v12}", RISCV::V12)
11242                         .Case("{v13}", RISCV::V13)
11243                         .Case("{v14}", RISCV::V14)
11244                         .Case("{v15}", RISCV::V15)
11245                         .Case("{v16}", RISCV::V16)
11246                         .Case("{v17}", RISCV::V17)
11247                         .Case("{v18}", RISCV::V18)
11248                         .Case("{v19}", RISCV::V19)
11249                         .Case("{v20}", RISCV::V20)
11250                         .Case("{v21}", RISCV::V21)
11251                         .Case("{v22}", RISCV::V22)
11252                         .Case("{v23}", RISCV::V23)
11253                         .Case("{v24}", RISCV::V24)
11254                         .Case("{v25}", RISCV::V25)
11255                         .Case("{v26}", RISCV::V26)
11256                         .Case("{v27}", RISCV::V27)
11257                         .Case("{v28}", RISCV::V28)
11258                         .Case("{v29}", RISCV::V29)
11259                         .Case("{v30}", RISCV::V30)
11260                         .Case("{v31}", RISCV::V31)
11261                         .Default(RISCV::NoRegister);
11262     if (VReg != RISCV::NoRegister) {
11263       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
11264         return std::make_pair(VReg, &RISCV::VMRegClass);
11265       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
11266         return std::make_pair(VReg, &RISCV::VRRegClass);
11267       for (const auto *RC :
11268            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
11269         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
11270           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
11271           return std::make_pair(VReg, RC);
11272         }
11273       }
11274     }
11275   }
11276 
11277   std::pair<Register, const TargetRegisterClass *> Res =
11278       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
11279 
11280   // If we picked one of the Zfinx register classes, remap it to the GPR class.
11281   // FIXME: When Zfinx is supported in CodeGen this will need to take the
11282   // Subtarget into account.
11283   if (Res.second == &RISCV::GPRF16RegClass ||
11284       Res.second == &RISCV::GPRF32RegClass ||
11285       Res.second == &RISCV::GPRF64RegClass)
11286     return std::make_pair(Res.first, &RISCV::GPRRegClass);
11287 
11288   return Res;
11289 }
11290 
11291 unsigned
11292 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
11293   // Currently only support length 1 constraints.
11294   if (ConstraintCode.size() == 1) {
11295     switch (ConstraintCode[0]) {
11296     case 'A':
11297       return InlineAsm::Constraint_A;
11298     default:
11299       break;
11300     }
11301   }
11302 
11303   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
11304 }
11305 
11306 void RISCVTargetLowering::LowerAsmOperandForConstraint(
11307     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
11308     SelectionDAG &DAG) const {
11309   // Currently only support length 1 constraints.
11310   if (Constraint.length() == 1) {
11311     switch (Constraint[0]) {
11312     case 'I':
11313       // Validate & create a 12-bit signed immediate operand.
11314       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11315         uint64_t CVal = C->getSExtValue();
11316         if (isInt<12>(CVal))
11317           Ops.push_back(
11318               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11319       }
11320       return;
11321     case 'J':
11322       // Validate & create an integer zero operand.
11323       if (auto *C = dyn_cast<ConstantSDNode>(Op))
11324         if (C->getZExtValue() == 0)
11325           Ops.push_back(
11326               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
11327       return;
11328     case 'K':
11329       // Validate & create a 5-bit unsigned immediate operand.
11330       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
11331         uint64_t CVal = C->getZExtValue();
11332         if (isUInt<5>(CVal))
11333           Ops.push_back(
11334               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
11335       }
11336       return;
11337     case 'S':
11338       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
11339         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
11340                                                  GA->getValueType(0)));
11341       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
11342         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
11343                                                 BA->getValueType(0)));
11344       }
11345       return;
11346     default:
11347       break;
11348     }
11349   }
11350   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
11351 }
11352 
11353 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
11354                                                    Instruction *Inst,
11355                                                    AtomicOrdering Ord) const {
11356   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
11357     return Builder.CreateFence(Ord);
11358   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
11359     return Builder.CreateFence(AtomicOrdering::Release);
11360   return nullptr;
11361 }
11362 
11363 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
11364                                                     Instruction *Inst,
11365                                                     AtomicOrdering Ord) const {
11366   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
11367     return Builder.CreateFence(AtomicOrdering::Acquire);
11368   return nullptr;
11369 }
11370 
11371 TargetLowering::AtomicExpansionKind
11372 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
11373   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
11374   // point operations can't be used in an lr/sc sequence without breaking the
11375   // forward-progress guarantee.
11376   if (AI->isFloatingPointOperation())
11377     return AtomicExpansionKind::CmpXChg;
11378 
11379   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
11380   if (Size == 8 || Size == 16)
11381     return AtomicExpansionKind::MaskedIntrinsic;
11382   return AtomicExpansionKind::None;
11383 }
11384 
11385 static Intrinsic::ID
11386 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
11387   if (XLen == 32) {
11388     switch (BinOp) {
11389     default:
11390       llvm_unreachable("Unexpected AtomicRMW BinOp");
11391     case AtomicRMWInst::Xchg:
11392       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
11393     case AtomicRMWInst::Add:
11394       return Intrinsic::riscv_masked_atomicrmw_add_i32;
11395     case AtomicRMWInst::Sub:
11396       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
11397     case AtomicRMWInst::Nand:
11398       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
11399     case AtomicRMWInst::Max:
11400       return Intrinsic::riscv_masked_atomicrmw_max_i32;
11401     case AtomicRMWInst::Min:
11402       return Intrinsic::riscv_masked_atomicrmw_min_i32;
11403     case AtomicRMWInst::UMax:
11404       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
11405     case AtomicRMWInst::UMin:
11406       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
11407     }
11408   }
11409 
11410   if (XLen == 64) {
11411     switch (BinOp) {
11412     default:
11413       llvm_unreachable("Unexpected AtomicRMW BinOp");
11414     case AtomicRMWInst::Xchg:
11415       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
11416     case AtomicRMWInst::Add:
11417       return Intrinsic::riscv_masked_atomicrmw_add_i64;
11418     case AtomicRMWInst::Sub:
11419       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
11420     case AtomicRMWInst::Nand:
11421       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
11422     case AtomicRMWInst::Max:
11423       return Intrinsic::riscv_masked_atomicrmw_max_i64;
11424     case AtomicRMWInst::Min:
11425       return Intrinsic::riscv_masked_atomicrmw_min_i64;
11426     case AtomicRMWInst::UMax:
11427       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
11428     case AtomicRMWInst::UMin:
11429       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
11430     }
11431   }
11432 
11433   llvm_unreachable("Unexpected XLen\n");
11434 }
11435 
11436 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
11437     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
11438     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
11439   unsigned XLen = Subtarget.getXLen();
11440   Value *Ordering =
11441       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
11442   Type *Tys[] = {AlignedAddr->getType()};
11443   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
11444       AI->getModule(),
11445       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
11446 
11447   if (XLen == 64) {
11448     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
11449     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11450     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
11451   }
11452 
11453   Value *Result;
11454 
11455   // Must pass the shift amount needed to sign extend the loaded value prior
11456   // to performing a signed comparison for min/max. ShiftAmt is the number of
11457   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
11458   // is the number of bits to left+right shift the value in order to
11459   // sign-extend.
11460   if (AI->getOperation() == AtomicRMWInst::Min ||
11461       AI->getOperation() == AtomicRMWInst::Max) {
11462     const DataLayout &DL = AI->getModule()->getDataLayout();
11463     unsigned ValWidth =
11464         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
11465     Value *SextShamt =
11466         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
11467     Result = Builder.CreateCall(LrwOpScwLoop,
11468                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
11469   } else {
11470     Result =
11471         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
11472   }
11473 
11474   if (XLen == 64)
11475     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11476   return Result;
11477 }
11478 
11479 TargetLowering::AtomicExpansionKind
11480 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
11481     AtomicCmpXchgInst *CI) const {
11482   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
11483   if (Size == 8 || Size == 16)
11484     return AtomicExpansionKind::MaskedIntrinsic;
11485   return AtomicExpansionKind::None;
11486 }
11487 
11488 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
11489     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
11490     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
11491   unsigned XLen = Subtarget.getXLen();
11492   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
11493   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
11494   if (XLen == 64) {
11495     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
11496     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
11497     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
11498     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
11499   }
11500   Type *Tys[] = {AlignedAddr->getType()};
11501   Function *MaskedCmpXchg =
11502       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
11503   Value *Result = Builder.CreateCall(
11504       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
11505   if (XLen == 64)
11506     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
11507   return Result;
11508 }
11509 
11510 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
11511   return false;
11512 }
11513 
11514 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
11515                                                EVT VT) const {
11516   if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple())
11517     return false;
11518 
11519   switch (FPVT.getSimpleVT().SimpleTy) {
11520   case MVT::f16:
11521     return Subtarget.hasStdExtZfh();
11522   case MVT::f32:
11523     return Subtarget.hasStdExtF();
11524   case MVT::f64:
11525     return Subtarget.hasStdExtD();
11526   default:
11527     return false;
11528   }
11529 }
11530 
11531 unsigned RISCVTargetLowering::getJumpTableEncoding() const {
11532   // If we are using the small code model, we can reduce size of jump table
11533   // entry to 4 bytes.
11534   if (Subtarget.is64Bit() && !isPositionIndependent() &&
11535       getTargetMachine().getCodeModel() == CodeModel::Small) {
11536     return MachineJumpTableInfo::EK_Custom32;
11537   }
11538   return TargetLowering::getJumpTableEncoding();
11539 }
11540 
11541 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
11542     const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
11543     unsigned uid, MCContext &Ctx) const {
11544   assert(Subtarget.is64Bit() && !isPositionIndependent() &&
11545          getTargetMachine().getCodeModel() == CodeModel::Small);
11546   return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx);
11547 }
11548 
11549 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
11550                                                      EVT VT) const {
11551   VT = VT.getScalarType();
11552 
11553   if (!VT.isSimple())
11554     return false;
11555 
11556   switch (VT.getSimpleVT().SimpleTy) {
11557   case MVT::f16:
11558     return Subtarget.hasStdExtZfh();
11559   case MVT::f32:
11560     return Subtarget.hasStdExtF();
11561   case MVT::f64:
11562     return Subtarget.hasStdExtD();
11563   default:
11564     break;
11565   }
11566 
11567   return false;
11568 }
11569 
11570 Register RISCVTargetLowering::getExceptionPointerRegister(
11571     const Constant *PersonalityFn) const {
11572   return RISCV::X10;
11573 }
11574 
11575 Register RISCVTargetLowering::getExceptionSelectorRegister(
11576     const Constant *PersonalityFn) const {
11577   return RISCV::X11;
11578 }
11579 
11580 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
11581   // Return false to suppress the unnecessary extensions if the LibCall
11582   // arguments or return value is f32 type for LP64 ABI.
11583   RISCVABI::ABI ABI = Subtarget.getTargetABI();
11584   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
11585     return false;
11586 
11587   return true;
11588 }
11589 
11590 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
11591   if (Subtarget.is64Bit() && Type == MVT::i32)
11592     return true;
11593 
11594   return IsSigned;
11595 }
11596 
11597 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
11598                                                  SDValue C) const {
11599   // Check integral scalar types.
11600   if (VT.isScalarInteger()) {
11601     // Omit the optimization if the sub target has the M extension and the data
11602     // size exceeds XLen.
11603     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
11604       return false;
11605     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
11606       // Break the MUL to a SLLI and an ADD/SUB.
11607       const APInt &Imm = ConstNode->getAPIntValue();
11608       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
11609           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
11610         return true;
11611       // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
11612       if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
11613           ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
11614            (Imm - 8).isPowerOf2()))
11615         return true;
11616       // Omit the following optimization if the sub target has the M extension
11617       // and the data size >= XLen.
11618       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
11619         return false;
11620       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
11621       // a pair of LUI/ADDI.
11622       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
11623         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
11624         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
11625             (1 - ImmS).isPowerOf2())
11626         return true;
11627       }
11628     }
11629   }
11630 
11631   return false;
11632 }
11633 
11634 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
11635                                                       SDValue ConstNode) const {
11636   // Let the DAGCombiner decide for vectors.
11637   EVT VT = AddNode.getValueType();
11638   if (VT.isVector())
11639     return true;
11640 
11641   // Let the DAGCombiner decide for larger types.
11642   if (VT.getScalarSizeInBits() > Subtarget.getXLen())
11643     return true;
11644 
11645   // It is worse if c1 is simm12 while c1*c2 is not.
11646   ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
11647   ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
11648   const APInt &C1 = C1Node->getAPIntValue();
11649   const APInt &C2 = C2Node->getAPIntValue();
11650   if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
11651     return false;
11652 
11653   // Default to true and let the DAGCombiner decide.
11654   return true;
11655 }
11656 
11657 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
11658     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
11659     bool *Fast) const {
11660   if (!VT.isVector())
11661     return false;
11662 
11663   EVT ElemVT = VT.getVectorElementType();
11664   if (Alignment >= ElemVT.getStoreSize()) {
11665     if (Fast)
11666       *Fast = true;
11667     return true;
11668   }
11669 
11670   return false;
11671 }
11672 
11673 bool RISCVTargetLowering::splitValueIntoRegisterParts(
11674     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
11675     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
11676   bool IsABIRegCopy = CC.hasValue();
11677   EVT ValueVT = Val.getValueType();
11678   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11679     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
11680     // and cast to f32.
11681     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
11682     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
11683     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
11684                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
11685     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
11686     Parts[0] = Val;
11687     return true;
11688   }
11689 
11690   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11691     LLVMContext &Context = *DAG.getContext();
11692     EVT ValueEltVT = ValueVT.getVectorElementType();
11693     EVT PartEltVT = PartVT.getVectorElementType();
11694     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11695     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11696     if (PartVTBitSize % ValueVTBitSize == 0) {
11697       assert(PartVTBitSize >= ValueVTBitSize);
11698       // If the element types are different, bitcast to the same element type of
11699       // PartVT first.
11700       // Give an example here, we want copy a <vscale x 1 x i8> value to
11701       // <vscale x 4 x i16>.
11702       // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert
11703       // subvector, then we can bitcast to <vscale x 4 x i16>.
11704       if (ValueEltVT != PartEltVT) {
11705         if (PartVTBitSize > ValueVTBitSize) {
11706           unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11707           assert(Count != 0 && "The number of element should not be zero.");
11708           EVT SameEltTypeVT =
11709               EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11710           Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT,
11711                             DAG.getUNDEF(SameEltTypeVT), Val,
11712                             DAG.getVectorIdxConstant(0, DL));
11713         }
11714         Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
11715       } else {
11716         Val =
11717             DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
11718                         Val, DAG.getVectorIdxConstant(0, DL));
11719       }
11720       Parts[0] = Val;
11721       return true;
11722     }
11723   }
11724   return false;
11725 }
11726 
11727 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
11728     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
11729     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
11730   bool IsABIRegCopy = CC.hasValue();
11731   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
11732     SDValue Val = Parts[0];
11733 
11734     // Cast the f32 to i32, truncate to i16, and cast back to f16.
11735     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
11736     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
11737     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
11738     return Val;
11739   }
11740 
11741   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
11742     LLVMContext &Context = *DAG.getContext();
11743     SDValue Val = Parts[0];
11744     EVT ValueEltVT = ValueVT.getVectorElementType();
11745     EVT PartEltVT = PartVT.getVectorElementType();
11746     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
11747     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
11748     if (PartVTBitSize % ValueVTBitSize == 0) {
11749       assert(PartVTBitSize >= ValueVTBitSize);
11750       EVT SameEltTypeVT = ValueVT;
11751       // If the element types are different, convert it to the same element type
11752       // of PartVT.
11753       // Give an example here, we want copy a <vscale x 1 x i8> value from
11754       // <vscale x 4 x i16>.
11755       // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first,
11756       // then we can extract <vscale x 1 x i8>.
11757       if (ValueEltVT != PartEltVT) {
11758         unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits();
11759         assert(Count != 0 && "The number of element should not be zero.");
11760         SameEltTypeVT =
11761             EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true);
11762         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
11763       }
11764       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
11765                         DAG.getVectorIdxConstant(0, DL));
11766       return Val;
11767     }
11768   }
11769   return SDValue();
11770 }
11771 
11772 SDValue
11773 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
11774                                    SelectionDAG &DAG,
11775                                    SmallVectorImpl<SDNode *> &Created) const {
11776   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
11777   if (isIntDivCheap(N->getValueType(0), Attr))
11778     return SDValue(N, 0); // Lower SDIV as SDIV
11779 
11780   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
11781          "Unexpected divisor!");
11782 
11783   // Conditional move is needed, so do the transformation iff Zbt is enabled.
11784   if (!Subtarget.hasStdExtZbt())
11785     return SDValue();
11786 
11787   // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
11788   // Besides, more critical path instructions will be generated when dividing
11789   // by 2. So we keep using the original DAGs for these cases.
11790   unsigned Lg2 = Divisor.countTrailingZeros();
11791   if (Lg2 == 1 || Lg2 >= 12)
11792     return SDValue();
11793 
11794   // fold (sdiv X, pow2)
11795   EVT VT = N->getValueType(0);
11796   if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
11797     return SDValue();
11798 
11799   SDLoc DL(N);
11800   SDValue N0 = N->getOperand(0);
11801   SDValue Zero = DAG.getConstant(0, DL, VT);
11802   SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
11803 
11804   // Add (N0 < 0) ? Pow2 - 1 : 0;
11805   SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
11806   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
11807   SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
11808 
11809   Created.push_back(Cmp.getNode());
11810   Created.push_back(Add.getNode());
11811   Created.push_back(Sel.getNode());
11812 
11813   // Divide by pow2.
11814   SDValue SRA =
11815       DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
11816 
11817   // If we're dividing by a positive value, we're done.  Otherwise, we must
11818   // negate the result.
11819   if (Divisor.isNonNegative())
11820     return SRA;
11821 
11822   Created.push_back(SRA.getNode());
11823   return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
11824 }
11825 
11826 #define GET_REGISTER_MATCHER
11827 #include "RISCVGenAsmMatcher.inc"
11828 
11829 Register
11830 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
11831                                        const MachineFunction &MF) const {
11832   Register Reg = MatchRegisterAltName(RegName);
11833   if (Reg == RISCV::NoRegister)
11834     Reg = MatchRegisterName(RegName);
11835   if (Reg == RISCV::NoRegister)
11836     report_fatal_error(
11837         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
11838   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
11839   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
11840     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
11841                              StringRef(RegName) + "\"."));
11842   return Reg;
11843 }
11844 
11845 namespace llvm {
11846 namespace RISCVVIntrinsicsTable {
11847 
11848 #define GET_RISCVVIntrinsicsTable_IMPL
11849 #include "RISCVGenSearchableTables.inc"
11850 
11851 } // namespace RISCVVIntrinsicsTable
11852 
11853 } // namespace llvm
11854