1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     setOperationAction(ISD::LRINT, MVT::f16, Legal);
318     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
319     setOperationAction(ISD::LROUND, MVT::f16, Legal);
320     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
321     for (auto CC : FPCCToExpand)
322       setCondCodeAction(CC, MVT::f16, Expand);
323     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
324     setOperationAction(ISD::SELECT, MVT::f16, Custom);
325     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
326     for (auto Op : FPOpToExpand)
327       setOperationAction(Op, MVT::f16, Expand);
328   }
329 
330   if (Subtarget.hasStdExtF()) {
331     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
332     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
333     setOperationAction(ISD::LRINT, MVT::f32, Legal);
334     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
335     setOperationAction(ISD::LROUND, MVT::f32, Legal);
336     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
337     for (auto CC : FPCCToExpand)
338       setCondCodeAction(CC, MVT::f32, Expand);
339     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT, MVT::f32, Custom);
341     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
342     for (auto Op : FPOpToExpand)
343       setOperationAction(Op, MVT::f32, Expand);
344     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
345     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
346   }
347 
348   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
349     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
350 
351   if (Subtarget.hasStdExtD()) {
352     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
353     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
354     setOperationAction(ISD::LRINT, MVT::f64, Legal);
355     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
356     setOperationAction(ISD::LROUND, MVT::f64, Legal);
357     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
358     for (auto CC : FPCCToExpand)
359       setCondCodeAction(CC, MVT::f64, Expand);
360     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
361     setOperationAction(ISD::SELECT, MVT::f64, Custom);
362     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
363     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
364     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
365     for (auto Op : FPOpToExpand)
366       setOperationAction(Op, MVT::f64, Expand);
367     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
368     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
369   }
370 
371   if (Subtarget.is64Bit()) {
372     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
373     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
374     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
375     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
376   }
377 
378   if (Subtarget.hasStdExtF()) {
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
384   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
385   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
386   setOperationAction(ISD::JumpTable, XLenVT, Custom);
387 
388   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
389 
390   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
391   // Unfortunately this can't be determined just from the ISA naming string.
392   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
393                      Subtarget.is64Bit() ? Legal : Custom);
394 
395   setOperationAction(ISD::TRAP, MVT::Other, Legal);
396   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
397   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
398   if (Subtarget.is64Bit())
399     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
400 
401   if (Subtarget.hasStdExtA()) {
402     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
403     setMinCmpXchgSizeInBits(32);
404   } else {
405     setMaxAtomicSizeInBitsSupported(0);
406   }
407 
408   setBooleanContents(ZeroOrOneBooleanContent);
409 
410   if (Subtarget.hasStdExtV()) {
411     setBooleanVectorContents(ZeroOrOneBooleanContent);
412 
413     setOperationAction(ISD::VSCALE, XLenVT, Custom);
414 
415     // RVV intrinsics may have illegal operands.
416     // We also need to custom legalize vmv.x.s.
417     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
418     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
419     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
420     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
421     if (Subtarget.is64Bit()) {
422       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
423     } else {
424       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
425       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
426     }
427 
428     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
429 
430     static unsigned IntegerVPOps[] = {
431         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
432         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
433         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
434 
435     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
436                                             ISD::VP_FMUL, ISD::VP_FDIV};
437 
438     if (!Subtarget.is64Bit()) {
439       // We must custom-lower certain vXi64 operations on RV32 due to the vector
440       // element type being illegal.
441       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
442       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
443 
444       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
445       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
446       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
447       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
448       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
449       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
450       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
451       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
452     }
453 
454     for (MVT VT : BoolVecVTs) {
455       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
456 
457       // Mask VTs are custom-expanded into a series of standard nodes
458       setOperationAction(ISD::TRUNCATE, VT, Custom);
459       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
460       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
461       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
462 
463       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
464       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
465 
466       setOperationAction(ISD::SELECT, VT, Custom);
467       setOperationAction(ISD::SELECT_CC, VT, Expand);
468       setOperationAction(ISD::VSELECT, VT, Expand);
469 
470       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
471       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
472       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
473 
474       // RVV has native int->float & float->int conversions where the
475       // element type sizes are within one power-of-two of each other. Any
476       // wider distances between type sizes have to be lowered as sequences
477       // which progressively narrow the gap in stages.
478       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
479       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
480       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
481       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
482 
483       // Expand all extending loads to types larger than this, and truncating
484       // stores from types larger than this.
485       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
486         setTruncStoreAction(OtherVT, VT, Expand);
487         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
488         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
489         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
490       }
491     }
492 
493     for (MVT VT : IntVecVTs) {
494       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
495       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
496 
497       setOperationAction(ISD::SMIN, VT, Legal);
498       setOperationAction(ISD::SMAX, VT, Legal);
499       setOperationAction(ISD::UMIN, VT, Legal);
500       setOperationAction(ISD::UMAX, VT, Legal);
501 
502       setOperationAction(ISD::ROTL, VT, Expand);
503       setOperationAction(ISD::ROTR, VT, Expand);
504 
505       // Custom-lower extensions and truncations from/to mask types.
506       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
507       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
508       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
509 
510       // RVV has native int->float & float->int conversions where the
511       // element type sizes are within one power-of-two of each other. Any
512       // wider distances between type sizes have to be lowered as sequences
513       // which progressively narrow the gap in stages.
514       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
515       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
516       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
517       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
518 
519       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
520       // nodes which truncate by one power of two at a time.
521       setOperationAction(ISD::TRUNCATE, VT, Custom);
522 
523       // Custom-lower insert/extract operations to simplify patterns.
524       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
525       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
526 
527       // Custom-lower reduction operations to set up the corresponding custom
528       // nodes' operands.
529       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
530       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
531       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
532       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
533       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
534       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
535       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
536       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
537 
538       for (unsigned VPOpc : IntegerVPOps)
539         setOperationAction(VPOpc, VT, Custom);
540 
541       setOperationAction(ISD::LOAD, VT, Custom);
542       setOperationAction(ISD::STORE, VT, Custom);
543 
544       setOperationAction(ISD::MLOAD, VT, Custom);
545       setOperationAction(ISD::MSTORE, VT, Custom);
546       setOperationAction(ISD::MGATHER, VT, Custom);
547       setOperationAction(ISD::MSCATTER, VT, Custom);
548 
549       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
550       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
551       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
552 
553       setOperationAction(ISD::SELECT, VT, Custom);
554       setOperationAction(ISD::SELECT_CC, VT, Expand);
555 
556       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
557       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
558 
559       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
560         setTruncStoreAction(VT, OtherVT, Expand);
561         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
562         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
563         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
564       }
565     }
566 
567     // Expand various CCs to best match the RVV ISA, which natively supports UNE
568     // but no other unordered comparisons, and supports all ordered comparisons
569     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
570     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
571     // and we pattern-match those back to the "original", swapping operands once
572     // more. This way we catch both operations and both "vf" and "fv" forms with
573     // fewer patterns.
574     ISD::CondCode VFPCCToExpand[] = {
575         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
576         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
577         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
578     };
579 
580     // Sets common operation actions on RVV floating-point vector types.
581     const auto SetCommonVFPActions = [&](MVT VT) {
582       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
583       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
584       // sizes are within one power-of-two of each other. Therefore conversions
585       // between vXf16 and vXf64 must be lowered as sequences which convert via
586       // vXf32.
587       setOperationAction(ISD::FP_ROUND, VT, Custom);
588       setOperationAction(ISD::FP_EXTEND, VT, Custom);
589       // Custom-lower insert/extract operations to simplify patterns.
590       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
591       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
592       // Expand various condition codes (explained above).
593       for (auto CC : VFPCCToExpand)
594         setCondCodeAction(CC, VT, Expand);
595 
596       setOperationAction(ISD::FMINNUM, VT, Legal);
597       setOperationAction(ISD::FMAXNUM, VT, Legal);
598 
599       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
600       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
601       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
602       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
603       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
604 
605       setOperationAction(ISD::LOAD, VT, Custom);
606       setOperationAction(ISD::STORE, VT, Custom);
607 
608       setOperationAction(ISD::MLOAD, VT, Custom);
609       setOperationAction(ISD::MSTORE, VT, Custom);
610       setOperationAction(ISD::MGATHER, VT, Custom);
611       setOperationAction(ISD::MSCATTER, VT, Custom);
612 
613       setOperationAction(ISD::SELECT, VT, Custom);
614       setOperationAction(ISD::SELECT_CC, VT, Expand);
615 
616       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
617       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
618       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
619 
620       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
621 
622       for (unsigned VPOpc : FloatingPointVPOps)
623         setOperationAction(VPOpc, VT, Custom);
624     };
625 
626     // Sets common extload/truncstore actions on RVV floating-point vector
627     // types.
628     const auto SetCommonVFPExtLoadTruncStoreActions =
629         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
630           for (auto SmallVT : SmallerVTs) {
631             setTruncStoreAction(VT, SmallVT, Expand);
632             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
633           }
634         };
635 
636     if (Subtarget.hasStdExtZfh())
637       for (MVT VT : F16VecVTs)
638         SetCommonVFPActions(VT);
639 
640     for (MVT VT : F32VecVTs) {
641       if (Subtarget.hasStdExtF())
642         SetCommonVFPActions(VT);
643       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
644     }
645 
646     for (MVT VT : F64VecVTs) {
647       if (Subtarget.hasStdExtD())
648         SetCommonVFPActions(VT);
649       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
650       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
651     }
652 
653     if (Subtarget.useRVVForFixedLengthVectors()) {
654       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
655         if (!useRVVForFixedLengthVectorVT(VT))
656           continue;
657 
658         // By default everything must be expanded.
659         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
660           setOperationAction(Op, VT, Expand);
661         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
662           setTruncStoreAction(VT, OtherVT, Expand);
663           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
664           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
665           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
666         }
667 
668         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
669         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
670         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
671 
672         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
673         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
674 
675         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
676         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
677 
678         setOperationAction(ISD::LOAD, VT, Custom);
679         setOperationAction(ISD::STORE, VT, Custom);
680 
681         setOperationAction(ISD::SETCC, VT, Custom);
682 
683         setOperationAction(ISD::SELECT, VT, Custom);
684 
685         setOperationAction(ISD::TRUNCATE, VT, Custom);
686 
687         setOperationAction(ISD::BITCAST, VT, Custom);
688 
689         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
690         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
691         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
692 
693         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
694         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
695         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
696         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
697 
698         // Operations below are different for between masks and other vectors.
699         if (VT.getVectorElementType() == MVT::i1) {
700           setOperationAction(ISD::AND, VT, Custom);
701           setOperationAction(ISD::OR, VT, Custom);
702           setOperationAction(ISD::XOR, VT, Custom);
703           continue;
704         }
705 
706         // Use SPLAT_VECTOR to prevent type legalization from destroying the
707         // splats when type legalizing i64 scalar on RV32.
708         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
709         // improvements first.
710         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
711           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
712           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
713         }
714 
715         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
716         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
717 
718         setOperationAction(ISD::MLOAD, VT, Custom);
719         setOperationAction(ISD::MSTORE, VT, Custom);
720         setOperationAction(ISD::MGATHER, VT, Custom);
721         setOperationAction(ISD::MSCATTER, VT, Custom);
722         setOperationAction(ISD::ADD, VT, Custom);
723         setOperationAction(ISD::MUL, VT, Custom);
724         setOperationAction(ISD::SUB, VT, Custom);
725         setOperationAction(ISD::AND, VT, Custom);
726         setOperationAction(ISD::OR, VT, Custom);
727         setOperationAction(ISD::XOR, VT, Custom);
728         setOperationAction(ISD::SDIV, VT, Custom);
729         setOperationAction(ISD::SREM, VT, Custom);
730         setOperationAction(ISD::UDIV, VT, Custom);
731         setOperationAction(ISD::UREM, VT, Custom);
732         setOperationAction(ISD::SHL, VT, Custom);
733         setOperationAction(ISD::SRA, VT, Custom);
734         setOperationAction(ISD::SRL, VT, Custom);
735 
736         setOperationAction(ISD::SMIN, VT, Custom);
737         setOperationAction(ISD::SMAX, VT, Custom);
738         setOperationAction(ISD::UMIN, VT, Custom);
739         setOperationAction(ISD::UMAX, VT, Custom);
740         setOperationAction(ISD::ABS,  VT, Custom);
741 
742         setOperationAction(ISD::MULHS, VT, Custom);
743         setOperationAction(ISD::MULHU, VT, Custom);
744 
745         setOperationAction(ISD::VSELECT, VT, Custom);
746         setOperationAction(ISD::SELECT_CC, VT, Expand);
747 
748         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
749         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
750         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
751 
752         // Custom-lower reduction operations to set up the corresponding custom
753         // nodes' operands.
754         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
755         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
756         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
757         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
758         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
759 
760         for (unsigned VPOpc : IntegerVPOps)
761           setOperationAction(VPOpc, VT, Custom);
762       }
763 
764       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
765         if (!useRVVForFixedLengthVectorVT(VT))
766           continue;
767 
768         // By default everything must be expanded.
769         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
770           setOperationAction(Op, VT, Expand);
771         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
772           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
773           setTruncStoreAction(VT, OtherVT, Expand);
774         }
775 
776         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
777         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
778         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
779 
780         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
781         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
782         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
783         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
784 
785         setOperationAction(ISD::LOAD, VT, Custom);
786         setOperationAction(ISD::STORE, VT, Custom);
787         setOperationAction(ISD::MLOAD, VT, Custom);
788         setOperationAction(ISD::MSTORE, VT, Custom);
789         setOperationAction(ISD::MGATHER, VT, Custom);
790         setOperationAction(ISD::MSCATTER, VT, Custom);
791         setOperationAction(ISD::FADD, VT, Custom);
792         setOperationAction(ISD::FSUB, VT, Custom);
793         setOperationAction(ISD::FMUL, VT, Custom);
794         setOperationAction(ISD::FDIV, VT, Custom);
795         setOperationAction(ISD::FNEG, VT, Custom);
796         setOperationAction(ISD::FABS, VT, Custom);
797         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
798         setOperationAction(ISD::FSQRT, VT, Custom);
799         setOperationAction(ISD::FMA, VT, Custom);
800         setOperationAction(ISD::FMINNUM, VT, Custom);
801         setOperationAction(ISD::FMAXNUM, VT, Custom);
802 
803         setOperationAction(ISD::FP_ROUND, VT, Custom);
804         setOperationAction(ISD::FP_EXTEND, VT, Custom);
805 
806         for (auto CC : VFPCCToExpand)
807           setCondCodeAction(CC, VT, Expand);
808 
809         setOperationAction(ISD::VSELECT, VT, Custom);
810         setOperationAction(ISD::SELECT, VT, Custom);
811         setOperationAction(ISD::SELECT_CC, VT, Expand);
812 
813         setOperationAction(ISD::BITCAST, VT, Custom);
814 
815         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
816         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
817         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
818         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
819 
820         for (unsigned VPOpc : FloatingPointVPOps)
821           setOperationAction(VPOpc, VT, Custom);
822       }
823 
824       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
825       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
826       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
827       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
828       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
829       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
830       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
831       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
832     }
833   }
834 
835   // Function alignments.
836   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
837   setMinFunctionAlignment(FunctionAlignment);
838   setPrefFunctionAlignment(FunctionAlignment);
839 
840   setMinimumJumpTableEntries(5);
841 
842   // Jumps are expensive, compared to logic
843   setJumpIsExpensive();
844 
845   // We can use any register for comparisons
846   setHasMultipleConditionRegisters();
847 
848   setTargetDAGCombine(ISD::AND);
849   setTargetDAGCombine(ISD::OR);
850   setTargetDAGCombine(ISD::XOR);
851   setTargetDAGCombine(ISD::ANY_EXTEND);
852   if (Subtarget.hasStdExtV()) {
853     setTargetDAGCombine(ISD::FCOPYSIGN);
854     setTargetDAGCombine(ISD::MGATHER);
855     setTargetDAGCombine(ISD::MSCATTER);
856     setTargetDAGCombine(ISD::SRA);
857     setTargetDAGCombine(ISD::SRL);
858     setTargetDAGCombine(ISD::SHL);
859   }
860 }
861 
862 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
863                                             LLVMContext &Context,
864                                             EVT VT) const {
865   if (!VT.isVector())
866     return getPointerTy(DL);
867   if (Subtarget.hasStdExtV() &&
868       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
869     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
870   return VT.changeVectorElementTypeToInteger();
871 }
872 
873 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
874   return Subtarget.getXLenVT();
875 }
876 
877 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
878                                              const CallInst &I,
879                                              MachineFunction &MF,
880                                              unsigned Intrinsic) const {
881   switch (Intrinsic) {
882   default:
883     return false;
884   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
885   case Intrinsic::riscv_masked_atomicrmw_add_i32:
886   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
887   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
888   case Intrinsic::riscv_masked_atomicrmw_max_i32:
889   case Intrinsic::riscv_masked_atomicrmw_min_i32:
890   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
891   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
892   case Intrinsic::riscv_masked_cmpxchg_i32: {
893     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
894     Info.opc = ISD::INTRINSIC_W_CHAIN;
895     Info.memVT = MVT::getVT(PtrTy->getElementType());
896     Info.ptrVal = I.getArgOperand(0);
897     Info.offset = 0;
898     Info.align = Align(4);
899     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
900                  MachineMemOperand::MOVolatile;
901     return true;
902   }
903   }
904 }
905 
906 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
907                                                 const AddrMode &AM, Type *Ty,
908                                                 unsigned AS,
909                                                 Instruction *I) const {
910   // No global is ever allowed as a base.
911   if (AM.BaseGV)
912     return false;
913 
914   // Require a 12-bit signed offset.
915   if (!isInt<12>(AM.BaseOffs))
916     return false;
917 
918   switch (AM.Scale) {
919   case 0: // "r+i" or just "i", depending on HasBaseReg.
920     break;
921   case 1:
922     if (!AM.HasBaseReg) // allow "r+i".
923       break;
924     return false; // disallow "r+r" or "r+r+i".
925   default:
926     return false;
927   }
928 
929   return true;
930 }
931 
932 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
933   return isInt<12>(Imm);
934 }
935 
936 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
937   return isInt<12>(Imm);
938 }
939 
940 // On RV32, 64-bit integers are split into their high and low parts and held
941 // in two different registers, so the trunc is free since the low register can
942 // just be used.
943 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
944   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
945     return false;
946   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
947   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
948   return (SrcBits == 64 && DestBits == 32);
949 }
950 
951 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
952   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
953       !SrcVT.isInteger() || !DstVT.isInteger())
954     return false;
955   unsigned SrcBits = SrcVT.getSizeInBits();
956   unsigned DestBits = DstVT.getSizeInBits();
957   return (SrcBits == 64 && DestBits == 32);
958 }
959 
960 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
961   // Zexts are free if they can be combined with a load.
962   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
963     EVT MemVT = LD->getMemoryVT();
964     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
965          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
966         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
967          LD->getExtensionType() == ISD::ZEXTLOAD))
968       return true;
969   }
970 
971   return TargetLowering::isZExtFree(Val, VT2);
972 }
973 
974 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
975   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
976 }
977 
978 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
979   return Subtarget.hasStdExtZbb();
980 }
981 
982 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
983   return Subtarget.hasStdExtZbb();
984 }
985 
986 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
987                                        bool ForCodeSize) const {
988   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
989     return false;
990   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
991     return false;
992   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
993     return false;
994   if (Imm.isNegZero())
995     return false;
996   return Imm.isZero();
997 }
998 
999 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1000   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1001          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1002          (VT == MVT::f64 && Subtarget.hasStdExtD());
1003 }
1004 
1005 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1006                                                       CallingConv::ID CC,
1007                                                       EVT VT) const {
1008   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1009   // end up using a GPR but that will be decided based on ABI.
1010   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1011     return MVT::f32;
1012 
1013   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1014 }
1015 
1016 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1017                                                            CallingConv::ID CC,
1018                                                            EVT VT) const {
1019   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1020   // end up using a GPR but that will be decided based on ABI.
1021   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1022     return 1;
1023 
1024   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1025 }
1026 
1027 // Changes the condition code and swaps operands if necessary, so the SetCC
1028 // operation matches one of the comparisons supported directly by branches
1029 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1030 // with 1/-1.
1031 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1032                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1033   // Convert X > -1 to X >= 0.
1034   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1035     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1036     CC = ISD::SETGE;
1037     return;
1038   }
1039   // Convert X < 1 to 0 >= X.
1040   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1041     RHS = LHS;
1042     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1043     CC = ISD::SETGE;
1044     return;
1045   }
1046 
1047   switch (CC) {
1048   default:
1049     break;
1050   case ISD::SETGT:
1051   case ISD::SETLE:
1052   case ISD::SETUGT:
1053   case ISD::SETULE:
1054     CC = ISD::getSetCCSwappedOperands(CC);
1055     std::swap(LHS, RHS);
1056     break;
1057   }
1058 }
1059 
1060 // Return the RISC-V branch opcode that matches the given DAG integer
1061 // condition code. The CondCode must be one of those supported by the RISC-V
1062 // ISA (see translateSetCCForBranch).
1063 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1064   switch (CC) {
1065   default:
1066     llvm_unreachable("Unsupported CondCode");
1067   case ISD::SETEQ:
1068     return RISCV::BEQ;
1069   case ISD::SETNE:
1070     return RISCV::BNE;
1071   case ISD::SETLT:
1072     return RISCV::BLT;
1073   case ISD::SETGE:
1074     return RISCV::BGE;
1075   case ISD::SETULT:
1076     return RISCV::BLTU;
1077   case ISD::SETUGE:
1078     return RISCV::BGEU;
1079   }
1080 }
1081 
1082 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1083   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1084   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1085   if (VT.getVectorElementType() == MVT::i1)
1086     KnownSize *= 8;
1087 
1088   switch (KnownSize) {
1089   default:
1090     llvm_unreachable("Invalid LMUL.");
1091   case 8:
1092     return RISCVII::VLMUL::LMUL_F8;
1093   case 16:
1094     return RISCVII::VLMUL::LMUL_F4;
1095   case 32:
1096     return RISCVII::VLMUL::LMUL_F2;
1097   case 64:
1098     return RISCVII::VLMUL::LMUL_1;
1099   case 128:
1100     return RISCVII::VLMUL::LMUL_2;
1101   case 256:
1102     return RISCVII::VLMUL::LMUL_4;
1103   case 512:
1104     return RISCVII::VLMUL::LMUL_8;
1105   }
1106 }
1107 
1108 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1109   switch (LMul) {
1110   default:
1111     llvm_unreachable("Invalid LMUL.");
1112   case RISCVII::VLMUL::LMUL_F8:
1113   case RISCVII::VLMUL::LMUL_F4:
1114   case RISCVII::VLMUL::LMUL_F2:
1115   case RISCVII::VLMUL::LMUL_1:
1116     return RISCV::VRRegClassID;
1117   case RISCVII::VLMUL::LMUL_2:
1118     return RISCV::VRM2RegClassID;
1119   case RISCVII::VLMUL::LMUL_4:
1120     return RISCV::VRM4RegClassID;
1121   case RISCVII::VLMUL::LMUL_8:
1122     return RISCV::VRM8RegClassID;
1123   }
1124 }
1125 
1126 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1127   RISCVII::VLMUL LMUL = getLMUL(VT);
1128   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1129       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1130       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1131       LMUL == RISCVII::VLMUL::LMUL_1) {
1132     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1133                   "Unexpected subreg numbering");
1134     return RISCV::sub_vrm1_0 + Index;
1135   }
1136   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1137     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1138                   "Unexpected subreg numbering");
1139     return RISCV::sub_vrm2_0 + Index;
1140   }
1141   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1142     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1143                   "Unexpected subreg numbering");
1144     return RISCV::sub_vrm4_0 + Index;
1145   }
1146   llvm_unreachable("Invalid vector type.");
1147 }
1148 
1149 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1150   if (VT.getVectorElementType() == MVT::i1)
1151     return RISCV::VRRegClassID;
1152   return getRegClassIDForLMUL(getLMUL(VT));
1153 }
1154 
1155 // Attempt to decompose a subvector insert/extract between VecVT and
1156 // SubVecVT via subregister indices. Returns the subregister index that
1157 // can perform the subvector insert/extract with the given element index, as
1158 // well as the index corresponding to any leftover subvectors that must be
1159 // further inserted/extracted within the register class for SubVecVT.
1160 std::pair<unsigned, unsigned>
1161 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1162     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1163     const RISCVRegisterInfo *TRI) {
1164   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1165                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1166                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1167                 "Register classes not ordered");
1168   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1169   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1170   // Try to compose a subregister index that takes us from the incoming
1171   // LMUL>1 register class down to the outgoing one. At each step we half
1172   // the LMUL:
1173   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1174   // Note that this is not guaranteed to find a subregister index, such as
1175   // when we are extracting from one VR type to another.
1176   unsigned SubRegIdx = RISCV::NoSubRegister;
1177   for (const unsigned RCID :
1178        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1179     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1180       VecVT = VecVT.getHalfNumVectorElementsVT();
1181       bool IsHi =
1182           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1183       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1184                                             getSubregIndexByMVT(VecVT, IsHi));
1185       if (IsHi)
1186         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1187     }
1188   return {SubRegIdx, InsertExtractIdx};
1189 }
1190 
1191 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1192 // stores for those types.
1193 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1194   return !Subtarget.useRVVForFixedLengthVectors() ||
1195          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1196 }
1197 
1198 static bool useRVVForFixedLengthVectorVT(MVT VT,
1199                                          const RISCVSubtarget &Subtarget) {
1200   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1201   if (!Subtarget.useRVVForFixedLengthVectors())
1202     return false;
1203 
1204   // We only support a set of vector types with a consistent maximum fixed size
1205   // across all supported vector element types to avoid legalization issues.
1206   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1207   // fixed-length vector type we support is 1024 bytes.
1208   if (VT.getFixedSizeInBits() > 1024 * 8)
1209     return false;
1210 
1211   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1212 
1213   // Don't use RVV for vectors we cannot scalarize if required.
1214   switch (VT.getVectorElementType().SimpleTy) {
1215   // i1 is supported but has different rules.
1216   default:
1217     return false;
1218   case MVT::i1:
1219     // Masks can only use a single register.
1220     if (VT.getVectorNumElements() > MinVLen)
1221       return false;
1222     MinVLen /= 8;
1223     break;
1224   case MVT::i8:
1225   case MVT::i16:
1226   case MVT::i32:
1227   case MVT::i64:
1228     break;
1229   case MVT::f16:
1230     if (!Subtarget.hasStdExtZfh())
1231       return false;
1232     break;
1233   case MVT::f32:
1234     if (!Subtarget.hasStdExtF())
1235       return false;
1236     break;
1237   case MVT::f64:
1238     if (!Subtarget.hasStdExtD())
1239       return false;
1240     break;
1241   }
1242 
1243   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1244   // Don't use RVV for types that don't fit.
1245   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1246     return false;
1247 
1248   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1249   // the base fixed length RVV support in place.
1250   if (!VT.isPow2VectorType())
1251     return false;
1252 
1253   return true;
1254 }
1255 
1256 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1257   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1258 }
1259 
1260 // Return the largest legal scalable vector type that matches VT's element type.
1261 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1262                                             const RISCVSubtarget &Subtarget) {
1263   // This may be called before legal types are setup.
1264   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1265           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1266          "Expected legal fixed length vector!");
1267 
1268   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1269 
1270   MVT EltVT = VT.getVectorElementType();
1271   switch (EltVT.SimpleTy) {
1272   default:
1273     llvm_unreachable("unexpected element type for RVV container");
1274   case MVT::i1:
1275   case MVT::i8:
1276   case MVT::i16:
1277   case MVT::i32:
1278   case MVT::i64:
1279   case MVT::f16:
1280   case MVT::f32:
1281   case MVT::f64: {
1282     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1283     // narrower types, but we can't have a fractional LMUL with demoninator less
1284     // than 64/SEW.
1285     unsigned NumElts =
1286         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1287     return MVT::getScalableVectorVT(EltVT, NumElts);
1288   }
1289   }
1290 }
1291 
1292 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1293                                             const RISCVSubtarget &Subtarget) {
1294   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1295                                           Subtarget);
1296 }
1297 
1298 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1299   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1300 }
1301 
1302 // Grow V to consume an entire RVV register.
1303 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1304                                        const RISCVSubtarget &Subtarget) {
1305   assert(VT.isScalableVector() &&
1306          "Expected to convert into a scalable vector!");
1307   assert(V.getValueType().isFixedLengthVector() &&
1308          "Expected a fixed length vector operand!");
1309   SDLoc DL(V);
1310   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1311   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1312 }
1313 
1314 // Shrink V so it's just big enough to maintain a VT's worth of data.
1315 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1316                                          const RISCVSubtarget &Subtarget) {
1317   assert(VT.isFixedLengthVector() &&
1318          "Expected to convert into a fixed length vector!");
1319   assert(V.getValueType().isScalableVector() &&
1320          "Expected a scalable vector operand!");
1321   SDLoc DL(V);
1322   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1323   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1324 }
1325 
1326 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1327 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1328 // the vector type that it is contained in.
1329 static std::pair<SDValue, SDValue>
1330 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1331                 const RISCVSubtarget &Subtarget) {
1332   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1333   MVT XLenVT = Subtarget.getXLenVT();
1334   SDValue VL = VecVT.isFixedLengthVector()
1335                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1336                    : DAG.getRegister(RISCV::X0, XLenVT);
1337   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1338   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1339   return {Mask, VL};
1340 }
1341 
1342 // As above but assuming the given type is a scalable vector type.
1343 static std::pair<SDValue, SDValue>
1344 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1345                         const RISCVSubtarget &Subtarget) {
1346   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1347   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1348 }
1349 
1350 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1351 // of either is (currently) supported. This can get us into an infinite loop
1352 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1353 // as a ..., etc.
1354 // Until either (or both) of these can reliably lower any node, reporting that
1355 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1356 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1357 // which is not desirable.
1358 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1359     EVT VT, unsigned DefinedValues) const {
1360   return false;
1361 }
1362 
1363 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1364   // Only splats are currently supported.
1365   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1366     return true;
1367 
1368   return false;
1369 }
1370 
1371 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1372                                  const RISCVSubtarget &Subtarget) {
1373   MVT VT = Op.getSimpleValueType();
1374   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1375 
1376   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1377 
1378   SDLoc DL(Op);
1379   SDValue Mask, VL;
1380   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1381 
1382   unsigned Opc =
1383       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1384   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1385   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1386 }
1387 
1388 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1389                                  const RISCVSubtarget &Subtarget) {
1390   MVT VT = Op.getSimpleValueType();
1391   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1392 
1393   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1394 
1395   SDLoc DL(Op);
1396   SDValue Mask, VL;
1397   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1398 
1399   MVT XLenVT = Subtarget.getXLenVT();
1400   unsigned NumElts = Op.getNumOperands();
1401 
1402   if (VT.getVectorElementType() == MVT::i1) {
1403     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1404       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1405       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1406     }
1407 
1408     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1409       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1410       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1411     }
1412 
1413     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1414     // scalar integer chunks whose bit-width depends on the number of mask
1415     // bits and XLEN.
1416     // First, determine the most appropriate scalar integer type to use. This
1417     // is at most XLenVT, but may be shrunk to a smaller vector element type
1418     // according to the size of the final vector - use i8 chunks rather than
1419     // XLenVT if we're producing a v8i1. This results in more consistent
1420     // codegen across RV32 and RV64.
1421     unsigned NumViaIntegerBits =
1422         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1423     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1424       // If we have to use more than one INSERT_VECTOR_ELT then this
1425       // optimization is likely to increase code size; avoid peforming it in
1426       // such a case. We can use a load from a constant pool in this case.
1427       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1428         return SDValue();
1429       // Now we can create our integer vector type. Note that it may be larger
1430       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1431       MVT IntegerViaVecVT =
1432           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1433                            divideCeil(NumElts, NumViaIntegerBits));
1434 
1435       uint64_t Bits = 0;
1436       unsigned BitPos = 0, IntegerEltIdx = 0;
1437       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1438 
1439       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1440         // Once we accumulate enough bits to fill our scalar type, insert into
1441         // our vector and clear our accumulated data.
1442         if (I != 0 && I % NumViaIntegerBits == 0) {
1443           if (NumViaIntegerBits <= 32)
1444             Bits = SignExtend64(Bits, 32);
1445           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1446           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1447                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1448           Bits = 0;
1449           BitPos = 0;
1450           IntegerEltIdx++;
1451         }
1452         SDValue V = Op.getOperand(I);
1453         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1454         Bits |= ((uint64_t)BitValue << BitPos);
1455       }
1456 
1457       // Insert the (remaining) scalar value into position in our integer
1458       // vector type.
1459       if (NumViaIntegerBits <= 32)
1460         Bits = SignExtend64(Bits, 32);
1461       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1462       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1463                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1464 
1465       if (NumElts < NumViaIntegerBits) {
1466         // If we're producing a smaller vector than our minimum legal integer
1467         // type, bitcast to the equivalent (known-legal) mask type, and extract
1468         // our final mask.
1469         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1470         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1471         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1472                           DAG.getConstant(0, DL, XLenVT));
1473       } else {
1474         // Else we must have produced an integer type with the same size as the
1475         // mask type; bitcast for the final result.
1476         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1477         Vec = DAG.getBitcast(VT, Vec);
1478       }
1479 
1480       return Vec;
1481     }
1482 
1483     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1484     // vector type, we have a legal equivalently-sized i8 type, so we can use
1485     // that.
1486     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1487     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1488 
1489     SDValue WideVec;
1490     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1491       // For a splat, perform a scalar truncate before creating the wider
1492       // vector.
1493       assert(Splat.getValueType() == XLenVT &&
1494              "Unexpected type for i1 splat value");
1495       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1496                           DAG.getConstant(1, DL, XLenVT));
1497       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1498     } else {
1499       SmallVector<SDValue, 8> Ops(Op->op_values());
1500       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1501       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1502       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1503     }
1504 
1505     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1506   }
1507 
1508   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1509     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1510                                         : RISCVISD::VMV_V_X_VL;
1511     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1512     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1513   }
1514 
1515   // Try and match an index sequence, which we can lower directly to the vid
1516   // instruction. An all-undef vector is matched by getSplatValue, above.
1517   if (VT.isInteger()) {
1518     bool IsVID = true;
1519     for (unsigned I = 0; I < NumElts && IsVID; I++)
1520       IsVID &= Op.getOperand(I).isUndef() ||
1521                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1522                 Op.getConstantOperandVal(I) == I);
1523 
1524     if (IsVID) {
1525       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1526       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1527     }
1528   }
1529 
1530   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1531   // when re-interpreted as a vector with a larger element type. For example,
1532   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1533   // could be instead splat as
1534   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1535   // TODO: This optimization could also work on non-constant splats, but it
1536   // would require bit-manipulation instructions to construct the splat value.
1537   SmallVector<SDValue> Sequence;
1538   unsigned EltBitSize = VT.getScalarSizeInBits();
1539   const auto *BV = cast<BuildVectorSDNode>(Op);
1540   if (VT.isInteger() && EltBitSize < 64 &&
1541       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1542       BV->getRepeatedSequence(Sequence) &&
1543       (Sequence.size() * EltBitSize) <= 64) {
1544     unsigned SeqLen = Sequence.size();
1545     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1546     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1547     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1548             ViaIntVT == MVT::i64) &&
1549            "Unexpected sequence type");
1550 
1551     unsigned EltIdx = 0;
1552     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1553     uint64_t SplatValue = 0;
1554     // Construct the amalgamated value which can be splatted as this larger
1555     // vector type.
1556     for (const auto &SeqV : Sequence) {
1557       if (!SeqV.isUndef())
1558         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1559                        << (EltIdx * EltBitSize));
1560       EltIdx++;
1561     }
1562 
1563     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1564     // achieve better constant materializion.
1565     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1566       SplatValue = SignExtend64(SplatValue, 32);
1567 
1568     // Since we can't introduce illegal i64 types at this stage, we can only
1569     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1570     // way we can use RVV instructions to splat.
1571     assert((ViaIntVT.bitsLE(XLenVT) ||
1572             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1573            "Unexpected bitcast sequence");
1574     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1575       SDValue ViaVL =
1576           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1577       MVT ViaContainerVT =
1578           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1579       SDValue Splat =
1580           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1581                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1582       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1583       return DAG.getBitcast(VT, Splat);
1584     }
1585   }
1586 
1587   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1588   // which constitute a large proportion of the elements. In such cases we can
1589   // splat a vector with the dominant element and make up the shortfall with
1590   // INSERT_VECTOR_ELTs.
1591   // Note that this includes vectors of 2 elements by association. The
1592   // upper-most element is the "dominant" one, allowing us to use a splat to
1593   // "insert" the upper element, and an insert of the lower element at position
1594   // 0, which improves codegen.
1595   SDValue DominantValue;
1596   unsigned MostCommonCount = 0;
1597   DenseMap<SDValue, unsigned> ValueCounts;
1598   unsigned NumUndefElts =
1599       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1600 
1601   for (SDValue V : Op->op_values()) {
1602     if (V.isUndef())
1603       continue;
1604 
1605     ValueCounts.insert(std::make_pair(V, 0));
1606     unsigned &Count = ValueCounts[V];
1607 
1608     // Is this value dominant? In case of a tie, prefer the highest element as
1609     // it's cheaper to insert near the beginning of a vector than it is at the
1610     // end.
1611     if (++Count >= MostCommonCount) {
1612       DominantValue = V;
1613       MostCommonCount = Count;
1614     }
1615   }
1616 
1617   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1618   unsigned NumDefElts = NumElts - NumUndefElts;
1619   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1620 
1621   // Don't perform this optimization when optimizing for size, since
1622   // materializing elements and inserting them tends to cause code bloat.
1623   if (!DAG.shouldOptForSize() &&
1624       ((MostCommonCount > DominantValueCountThreshold) ||
1625        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1626     // Start by splatting the most common element.
1627     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1628 
1629     DenseSet<SDValue> Processed{DominantValue};
1630     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1631     for (const auto &OpIdx : enumerate(Op->ops())) {
1632       const SDValue &V = OpIdx.value();
1633       if (V.isUndef() || !Processed.insert(V).second)
1634         continue;
1635       if (ValueCounts[V] == 1) {
1636         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1637                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1638       } else {
1639         // Blend in all instances of this value using a VSELECT, using a
1640         // mask where each bit signals whether that element is the one
1641         // we're after.
1642         SmallVector<SDValue> Ops;
1643         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1644           return DAG.getConstant(V == V1, DL, XLenVT);
1645         });
1646         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1647                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1648                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1649       }
1650     }
1651 
1652     return Vec;
1653   }
1654 
1655   return SDValue();
1656 }
1657 
1658 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1659                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1660   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1661     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1662     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1663     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1664     // node in order to try and match RVV vector/scalar instructions.
1665     if ((LoC >> 31) == HiC)
1666       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1667   }
1668 
1669   // Fall back to a stack store and stride x0 vector load.
1670   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1671 }
1672 
1673 // Called by type legalization to handle splat of i64 on RV32.
1674 // FIXME: We can optimize this when the type has sign or zero bits in one
1675 // of the halves.
1676 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1677                                    SDValue VL, SelectionDAG &DAG) {
1678   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1679   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1680                            DAG.getConstant(0, DL, MVT::i32));
1681   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1682                            DAG.getConstant(1, DL, MVT::i32));
1683   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1684 }
1685 
1686 // This function lowers a splat of a scalar operand Splat with the vector
1687 // length VL. It ensures the final sequence is type legal, which is useful when
1688 // lowering a splat after type legalization.
1689 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1690                                 SelectionDAG &DAG,
1691                                 const RISCVSubtarget &Subtarget) {
1692   if (VT.isFloatingPoint())
1693     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1694 
1695   MVT XLenVT = Subtarget.getXLenVT();
1696 
1697   // Simplest case is that the operand needs to be promoted to XLenVT.
1698   if (Scalar.getValueType().bitsLE(XLenVT)) {
1699     // If the operand is a constant, sign extend to increase our chances
1700     // of being able to use a .vi instruction. ANY_EXTEND would become a
1701     // a zero extend and the simm5 check in isel would fail.
1702     // FIXME: Should we ignore the upper bits in isel instead?
1703     unsigned ExtOpc =
1704         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1705     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1706     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1707   }
1708 
1709   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1710          "Unexpected scalar for splat lowering!");
1711 
1712   // Otherwise use the more complicated splatting algorithm.
1713   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1714 }
1715 
1716 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1717                                    const RISCVSubtarget &Subtarget) {
1718   SDValue V1 = Op.getOperand(0);
1719   SDValue V2 = Op.getOperand(1);
1720   SDLoc DL(Op);
1721   MVT XLenVT = Subtarget.getXLenVT();
1722   MVT VT = Op.getSimpleValueType();
1723   unsigned NumElts = VT.getVectorNumElements();
1724   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1725 
1726   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1727 
1728   SDValue TrueMask, VL;
1729   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1730 
1731   if (SVN->isSplat()) {
1732     const int Lane = SVN->getSplatIndex();
1733     if (Lane >= 0) {
1734       MVT SVT = VT.getVectorElementType();
1735 
1736       // Turn splatted vector load into a strided load with an X0 stride.
1737       SDValue V = V1;
1738       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1739       // with undef.
1740       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1741       int Offset = Lane;
1742       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1743         int OpElements =
1744             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1745         V = V.getOperand(Offset / OpElements);
1746         Offset %= OpElements;
1747       }
1748 
1749       // We need to ensure the load isn't atomic or volatile.
1750       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1751         auto *Ld = cast<LoadSDNode>(V);
1752         Offset *= SVT.getStoreSize();
1753         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1754                                                    TypeSize::Fixed(Offset), DL);
1755 
1756         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1757         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1758           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1759           SDValue IntID =
1760               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1761           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1762                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1763           SDValue NewLoad = DAG.getMemIntrinsicNode(
1764               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1765               DAG.getMachineFunction().getMachineMemOperand(
1766                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1767           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1768           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1769         }
1770 
1771         // Otherwise use a scalar load and splat. This will give the best
1772         // opportunity to fold a splat into the operation. ISel can turn it into
1773         // the x0 strided load if we aren't able to fold away the select.
1774         if (SVT.isFloatingPoint())
1775           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1776                           Ld->getPointerInfo().getWithOffset(Offset),
1777                           Ld->getOriginalAlign(),
1778                           Ld->getMemOperand()->getFlags());
1779         else
1780           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1781                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1782                              Ld->getOriginalAlign(),
1783                              Ld->getMemOperand()->getFlags());
1784         DAG.makeEquivalentMemoryOrdering(Ld, V);
1785 
1786         unsigned Opc =
1787             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1788         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1789         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1790       }
1791 
1792       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1793       assert(Lane < (int)NumElts && "Unexpected lane!");
1794       SDValue Gather =
1795           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1796                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1797       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1798     }
1799   }
1800 
1801   // Detect shuffles which can be re-expressed as vector selects; these are
1802   // shuffles in which each element in the destination is taken from an element
1803   // at the corresponding index in either source vectors.
1804   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1805     int MaskIndex = MaskIdx.value();
1806     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1807   });
1808 
1809   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1810 
1811   SmallVector<SDValue> MaskVals;
1812   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1813   // merged with a second vrgather.
1814   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1815 
1816   // By default we preserve the original operand order, and use a mask to
1817   // select LHS as true and RHS as false. However, since RVV vector selects may
1818   // feature splats but only on the LHS, we may choose to invert our mask and
1819   // instead select between RHS and LHS.
1820   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1821   bool InvertMask = IsSelect == SwapOps;
1822 
1823   // Now construct the mask that will be used by the vselect or blended
1824   // vrgather operation. For vrgathers, construct the appropriate indices into
1825   // each vector.
1826   for (int MaskIndex : SVN->getMask()) {
1827     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1828     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1829     if (!IsSelect) {
1830       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
1831       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
1832                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
1833                                      : DAG.getUNDEF(XLenVT));
1834       GatherIndicesRHS.push_back(
1835           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
1836                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
1837     }
1838   }
1839 
1840   if (SwapOps) {
1841     std::swap(V1, V2);
1842     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1843   }
1844 
1845   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1846   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1847   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1848 
1849   if (IsSelect)
1850     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1851 
1852   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1853     // On such a large vector we're unable to use i8 as the index type.
1854     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1855     // may involve vector splitting if we're already at LMUL=8, or our
1856     // user-supplied maximum fixed-length LMUL.
1857     return SDValue();
1858   }
1859 
1860   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1861   MVT IndexVT = VT.changeTypeToInteger();
1862   // Since we can't introduce illegal index types at this stage, use i16 and
1863   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1864   // than XLenVT.
1865   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1866     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1867     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1868   }
1869 
1870   MVT IndexContainerVT =
1871       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1872 
1873   SDValue Gather;
1874   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1875   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1876   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1877     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1878   } else {
1879     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1880     LHSIndices =
1881         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1882 
1883     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1884     Gather =
1885         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1886   }
1887 
1888   // If a second vector operand is used by this shuffle, blend it in with an
1889   // additional vrgather.
1890   if (!V2.isUndef()) {
1891     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1892     SelectMask =
1893         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1894 
1895     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1896     RHSIndices =
1897         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1898 
1899     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1900     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1901     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1902                          Gather, VL);
1903   }
1904 
1905   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1906 }
1907 
1908 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1909                                      SDLoc DL, SelectionDAG &DAG,
1910                                      const RISCVSubtarget &Subtarget) {
1911   if (VT.isScalableVector())
1912     return DAG.getFPExtendOrRound(Op, DL, VT);
1913   assert(VT.isFixedLengthVector() &&
1914          "Unexpected value type for RVV FP extend/round lowering");
1915   SDValue Mask, VL;
1916   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1917   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1918                         ? RISCVISD::FP_EXTEND_VL
1919                         : RISCVISD::FP_ROUND_VL;
1920   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1921 }
1922 
1923 // While RVV has alignment restrictions, we should always be able to load as a
1924 // legal equivalently-sized byte-typed vector instead. This method is
1925 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
1926 // the load is already correctly-aligned, it returns SDValue().
1927 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
1928                                                     SelectionDAG &DAG) const {
1929   auto *Load = cast<LoadSDNode>(Op);
1930   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
1931 
1932   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1933                                      Load->getMemoryVT(),
1934                                      *Load->getMemOperand()))
1935     return SDValue();
1936 
1937   SDLoc DL(Op);
1938   MVT VT = Op.getSimpleValueType();
1939   unsigned EltSizeBits = VT.getScalarSizeInBits();
1940   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1941          "Unexpected unaligned RVV load type");
1942   MVT NewVT =
1943       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1944   assert(NewVT.isValid() &&
1945          "Expecting equally-sized RVV vector types to be legal");
1946   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
1947                           Load->getPointerInfo(), Load->getOriginalAlign(),
1948                           Load->getMemOperand()->getFlags());
1949   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
1950 }
1951 
1952 // While RVV has alignment restrictions, we should always be able to store as a
1953 // legal equivalently-sized byte-typed vector instead. This method is
1954 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
1955 // returns SDValue() if the store is already correctly aligned.
1956 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
1957                                                      SelectionDAG &DAG) const {
1958   auto *Store = cast<StoreSDNode>(Op);
1959   assert(Store && Store->getValue().getValueType().isVector() &&
1960          "Expected vector store");
1961 
1962   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1963                                      Store->getMemoryVT(),
1964                                      *Store->getMemOperand()))
1965     return SDValue();
1966 
1967   SDLoc DL(Op);
1968   SDValue StoredVal = Store->getValue();
1969   MVT VT = StoredVal.getSimpleValueType();
1970   unsigned EltSizeBits = VT.getScalarSizeInBits();
1971   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1972          "Unexpected unaligned RVV store type");
1973   MVT NewVT =
1974       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1975   assert(NewVT.isValid() &&
1976          "Expecting equally-sized RVV vector types to be legal");
1977   StoredVal = DAG.getBitcast(NewVT, StoredVal);
1978   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
1979                       Store->getPointerInfo(), Store->getOriginalAlign(),
1980                       Store->getMemOperand()->getFlags());
1981 }
1982 
1983 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1984                                             SelectionDAG &DAG) const {
1985   switch (Op.getOpcode()) {
1986   default:
1987     report_fatal_error("unimplemented operand");
1988   case ISD::GlobalAddress:
1989     return lowerGlobalAddress(Op, DAG);
1990   case ISD::BlockAddress:
1991     return lowerBlockAddress(Op, DAG);
1992   case ISD::ConstantPool:
1993     return lowerConstantPool(Op, DAG);
1994   case ISD::JumpTable:
1995     return lowerJumpTable(Op, DAG);
1996   case ISD::GlobalTLSAddress:
1997     return lowerGlobalTLSAddress(Op, DAG);
1998   case ISD::SELECT:
1999     return lowerSELECT(Op, DAG);
2000   case ISD::BRCOND:
2001     return lowerBRCOND(Op, DAG);
2002   case ISD::VASTART:
2003     return lowerVASTART(Op, DAG);
2004   case ISD::FRAMEADDR:
2005     return lowerFRAMEADDR(Op, DAG);
2006   case ISD::RETURNADDR:
2007     return lowerRETURNADDR(Op, DAG);
2008   case ISD::SHL_PARTS:
2009     return lowerShiftLeftParts(Op, DAG);
2010   case ISD::SRA_PARTS:
2011     return lowerShiftRightParts(Op, DAG, true);
2012   case ISD::SRL_PARTS:
2013     return lowerShiftRightParts(Op, DAG, false);
2014   case ISD::BITCAST: {
2015     SDLoc DL(Op);
2016     EVT VT = Op.getValueType();
2017     SDValue Op0 = Op.getOperand(0);
2018     EVT Op0VT = Op0.getValueType();
2019     MVT XLenVT = Subtarget.getXLenVT();
2020     if (VT.isFixedLengthVector()) {
2021       // We can handle fixed length vector bitcasts with a simple replacement
2022       // in isel.
2023       if (Op0VT.isFixedLengthVector())
2024         return Op;
2025       // When bitcasting from scalar to fixed-length vector, insert the scalar
2026       // into a one-element vector of the result type, and perform a vector
2027       // bitcast.
2028       if (!Op0VT.isVector()) {
2029         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2030         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2031                                               DAG.getUNDEF(BVT), Op0,
2032                                               DAG.getConstant(0, DL, XLenVT)));
2033       }
2034       return SDValue();
2035     }
2036     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2037     // thus: bitcast the vector to a one-element vector type whose element type
2038     // is the same as the result type, and extract the first element.
2039     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2040       LLVMContext &Context = *DAG.getContext();
2041       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2042       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2043                          DAG.getConstant(0, DL, XLenVT));
2044     }
2045     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2046       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2047       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2048       return FPConv;
2049     }
2050     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2051         Subtarget.hasStdExtF()) {
2052       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2053       SDValue FPConv =
2054           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2055       return FPConv;
2056     }
2057     return SDValue();
2058   }
2059   case ISD::INTRINSIC_WO_CHAIN:
2060     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2061   case ISD::INTRINSIC_W_CHAIN:
2062     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2063   case ISD::BSWAP:
2064   case ISD::BITREVERSE: {
2065     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2066     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2067     MVT VT = Op.getSimpleValueType();
2068     SDLoc DL(Op);
2069     // Start with the maximum immediate value which is the bitwidth - 1.
2070     unsigned Imm = VT.getSizeInBits() - 1;
2071     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2072     if (Op.getOpcode() == ISD::BSWAP)
2073       Imm &= ~0x7U;
2074     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2075                        DAG.getConstant(Imm, DL, VT));
2076   }
2077   case ISD::FSHL:
2078   case ISD::FSHR: {
2079     MVT VT = Op.getSimpleValueType();
2080     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2081     SDLoc DL(Op);
2082     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2083       return Op;
2084     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2085     // use log(XLen) bits. Mask the shift amount accordingly.
2086     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2087     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2088                                 DAG.getConstant(ShAmtWidth, DL, VT));
2089     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2090     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2091   }
2092   case ISD::TRUNCATE: {
2093     SDLoc DL(Op);
2094     MVT VT = Op.getSimpleValueType();
2095     // Only custom-lower vector truncates
2096     if (!VT.isVector())
2097       return Op;
2098 
2099     // Truncates to mask types are handled differently
2100     if (VT.getVectorElementType() == MVT::i1)
2101       return lowerVectorMaskTrunc(Op, DAG);
2102 
2103     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2104     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2105     // truncate by one power of two at a time.
2106     MVT DstEltVT = VT.getVectorElementType();
2107 
2108     SDValue Src = Op.getOperand(0);
2109     MVT SrcVT = Src.getSimpleValueType();
2110     MVT SrcEltVT = SrcVT.getVectorElementType();
2111 
2112     assert(DstEltVT.bitsLT(SrcEltVT) &&
2113            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2114            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2115            "Unexpected vector truncate lowering");
2116 
2117     MVT ContainerVT = SrcVT;
2118     if (SrcVT.isFixedLengthVector()) {
2119       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2120       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2121     }
2122 
2123     SDValue Result = Src;
2124     SDValue Mask, VL;
2125     std::tie(Mask, VL) =
2126         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2127     LLVMContext &Context = *DAG.getContext();
2128     const ElementCount Count = ContainerVT.getVectorElementCount();
2129     do {
2130       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2131       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2132       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2133                            Mask, VL);
2134     } while (SrcEltVT != DstEltVT);
2135 
2136     if (SrcVT.isFixedLengthVector())
2137       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2138 
2139     return Result;
2140   }
2141   case ISD::ANY_EXTEND:
2142   case ISD::ZERO_EXTEND:
2143     if (Op.getOperand(0).getValueType().isVector() &&
2144         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2145       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2146     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2147   case ISD::SIGN_EXTEND:
2148     if (Op.getOperand(0).getValueType().isVector() &&
2149         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2150       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2151     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2152   case ISD::SPLAT_VECTOR_PARTS:
2153     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2154   case ISD::INSERT_VECTOR_ELT:
2155     return lowerINSERT_VECTOR_ELT(Op, DAG);
2156   case ISD::EXTRACT_VECTOR_ELT:
2157     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2158   case ISD::VSCALE: {
2159     MVT VT = Op.getSimpleValueType();
2160     SDLoc DL(Op);
2161     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2162     // We define our scalable vector types for lmul=1 to use a 64 bit known
2163     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2164     // vscale as VLENB / 8.
2165     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2166     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2167                                  DAG.getConstant(3, DL, VT));
2168     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2169   }
2170   case ISD::FP_EXTEND: {
2171     // RVV can only do fp_extend to types double the size as the source. We
2172     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2173     // via f32.
2174     SDLoc DL(Op);
2175     MVT VT = Op.getSimpleValueType();
2176     SDValue Src = Op.getOperand(0);
2177     MVT SrcVT = Src.getSimpleValueType();
2178 
2179     // Prepare any fixed-length vector operands.
2180     MVT ContainerVT = VT;
2181     if (SrcVT.isFixedLengthVector()) {
2182       ContainerVT = getContainerForFixedLengthVector(VT);
2183       MVT SrcContainerVT =
2184           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2185       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2186     }
2187 
2188     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2189         SrcVT.getVectorElementType() != MVT::f16) {
2190       // For scalable vectors, we only need to close the gap between
2191       // vXf16->vXf64.
2192       if (!VT.isFixedLengthVector())
2193         return Op;
2194       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2195       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2196       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2197     }
2198 
2199     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2200     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2201     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2202         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2203 
2204     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2205                                            DL, DAG, Subtarget);
2206     if (VT.isFixedLengthVector())
2207       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2208     return Extend;
2209   }
2210   case ISD::FP_ROUND: {
2211     // RVV can only do fp_round to types half the size as the source. We
2212     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2213     // conversion instruction.
2214     SDLoc DL(Op);
2215     MVT VT = Op.getSimpleValueType();
2216     SDValue Src = Op.getOperand(0);
2217     MVT SrcVT = Src.getSimpleValueType();
2218 
2219     // Prepare any fixed-length vector operands.
2220     MVT ContainerVT = VT;
2221     if (VT.isFixedLengthVector()) {
2222       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2223       ContainerVT =
2224           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2225       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2226     }
2227 
2228     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2229         SrcVT.getVectorElementType() != MVT::f64) {
2230       // For scalable vectors, we only need to close the gap between
2231       // vXf64<->vXf16.
2232       if (!VT.isFixedLengthVector())
2233         return Op;
2234       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2235       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2236       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2237     }
2238 
2239     SDValue Mask, VL;
2240     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2241 
2242     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2243     SDValue IntermediateRound =
2244         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2245     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2246                                           DL, DAG, Subtarget);
2247 
2248     if (VT.isFixedLengthVector())
2249       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2250     return Round;
2251   }
2252   case ISD::FP_TO_SINT:
2253   case ISD::FP_TO_UINT:
2254   case ISD::SINT_TO_FP:
2255   case ISD::UINT_TO_FP: {
2256     // RVV can only do fp<->int conversions to types half/double the size as
2257     // the source. We custom-lower any conversions that do two hops into
2258     // sequences.
2259     MVT VT = Op.getSimpleValueType();
2260     if (!VT.isVector())
2261       return Op;
2262     SDLoc DL(Op);
2263     SDValue Src = Op.getOperand(0);
2264     MVT EltVT = VT.getVectorElementType();
2265     MVT SrcVT = Src.getSimpleValueType();
2266     MVT SrcEltVT = SrcVT.getVectorElementType();
2267     unsigned EltSize = EltVT.getSizeInBits();
2268     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2269     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2270            "Unexpected vector element types");
2271 
2272     bool IsInt2FP = SrcEltVT.isInteger();
2273     // Widening conversions
2274     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2275       if (IsInt2FP) {
2276         // Do a regular integer sign/zero extension then convert to float.
2277         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2278                                       VT.getVectorElementCount());
2279         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2280                                  ? ISD::ZERO_EXTEND
2281                                  : ISD::SIGN_EXTEND;
2282         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2283         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2284       }
2285       // FP2Int
2286       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2287       // Do one doubling fp_extend then complete the operation by converting
2288       // to int.
2289       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2290       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2291       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2292     }
2293 
2294     // Narrowing conversions
2295     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2296       if (IsInt2FP) {
2297         // One narrowing int_to_fp, then an fp_round.
2298         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2299         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2300         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2301         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2302       }
2303       // FP2Int
2304       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2305       // representable by the integer, the result is poison.
2306       MVT IVecVT =
2307           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2308                            VT.getVectorElementCount());
2309       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2310       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2311     }
2312 
2313     // Scalable vectors can exit here. Patterns will handle equally-sized
2314     // conversions halving/doubling ones.
2315     if (!VT.isFixedLengthVector())
2316       return Op;
2317 
2318     // For fixed-length vectors we lower to a custom "VL" node.
2319     unsigned RVVOpc = 0;
2320     switch (Op.getOpcode()) {
2321     default:
2322       llvm_unreachable("Impossible opcode");
2323     case ISD::FP_TO_SINT:
2324       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2325       break;
2326     case ISD::FP_TO_UINT:
2327       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2328       break;
2329     case ISD::SINT_TO_FP:
2330       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2331       break;
2332     case ISD::UINT_TO_FP:
2333       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2334       break;
2335     }
2336 
2337     MVT ContainerVT, SrcContainerVT;
2338     // Derive the reference container type from the larger vector type.
2339     if (SrcEltSize > EltSize) {
2340       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2341       ContainerVT =
2342           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2343     } else {
2344       ContainerVT = getContainerForFixedLengthVector(VT);
2345       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2346     }
2347 
2348     SDValue Mask, VL;
2349     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2350 
2351     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2352     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2353     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2354   }
2355   case ISD::VECREDUCE_ADD:
2356   case ISD::VECREDUCE_UMAX:
2357   case ISD::VECREDUCE_SMAX:
2358   case ISD::VECREDUCE_UMIN:
2359   case ISD::VECREDUCE_SMIN:
2360     return lowerVECREDUCE(Op, DAG);
2361   case ISD::VECREDUCE_AND:
2362   case ISD::VECREDUCE_OR:
2363   case ISD::VECREDUCE_XOR:
2364     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2365       return lowerVectorMaskVECREDUCE(Op, DAG);
2366     return lowerVECREDUCE(Op, DAG);
2367   case ISD::VECREDUCE_FADD:
2368   case ISD::VECREDUCE_SEQ_FADD:
2369   case ISD::VECREDUCE_FMIN:
2370   case ISD::VECREDUCE_FMAX:
2371     return lowerFPVECREDUCE(Op, DAG);
2372   case ISD::INSERT_SUBVECTOR:
2373     return lowerINSERT_SUBVECTOR(Op, DAG);
2374   case ISD::EXTRACT_SUBVECTOR:
2375     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2376   case ISD::STEP_VECTOR:
2377     return lowerSTEP_VECTOR(Op, DAG);
2378   case ISD::VECTOR_REVERSE:
2379     return lowerVECTOR_REVERSE(Op, DAG);
2380   case ISD::BUILD_VECTOR:
2381     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2382   case ISD::SPLAT_VECTOR:
2383     if (Op.getValueType().getVectorElementType() == MVT::i1)
2384       return lowerVectorMaskSplat(Op, DAG);
2385     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2386   case ISD::VECTOR_SHUFFLE:
2387     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2388   case ISD::CONCAT_VECTORS: {
2389     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2390     // better than going through the stack, as the default expansion does.
2391     SDLoc DL(Op);
2392     MVT VT = Op.getSimpleValueType();
2393     unsigned NumOpElts =
2394         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2395     SDValue Vec = DAG.getUNDEF(VT);
2396     for (const auto &OpIdx : enumerate(Op->ops()))
2397       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2398                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2399     return Vec;
2400   }
2401   case ISD::LOAD:
2402     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2403       return V;
2404     if (Op.getValueType().isFixedLengthVector())
2405       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2406     return Op;
2407   case ISD::STORE:
2408     if (auto V = expandUnalignedRVVStore(Op, DAG))
2409       return V;
2410     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2411       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2412     return Op;
2413   case ISD::MLOAD:
2414     return lowerMLOAD(Op, DAG);
2415   case ISD::MSTORE:
2416     return lowerMSTORE(Op, DAG);
2417   case ISD::SETCC:
2418     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2419   case ISD::ADD:
2420     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2421   case ISD::SUB:
2422     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2423   case ISD::MUL:
2424     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2425   case ISD::MULHS:
2426     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2427   case ISD::MULHU:
2428     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2429   case ISD::AND:
2430     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2431                                               RISCVISD::AND_VL);
2432   case ISD::OR:
2433     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2434                                               RISCVISD::OR_VL);
2435   case ISD::XOR:
2436     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2437                                               RISCVISD::XOR_VL);
2438   case ISD::SDIV:
2439     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2440   case ISD::SREM:
2441     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2442   case ISD::UDIV:
2443     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2444   case ISD::UREM:
2445     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2446   case ISD::SHL:
2447   case ISD::SRA:
2448   case ISD::SRL:
2449     if (Op.getSimpleValueType().isFixedLengthVector())
2450       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2451     // This can be called for an i32 shift amount that needs to be promoted.
2452     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2453            "Unexpected custom legalisation");
2454     return SDValue();
2455   case ISD::FADD:
2456     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2457   case ISD::FSUB:
2458     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2459   case ISD::FMUL:
2460     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2461   case ISD::FDIV:
2462     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2463   case ISD::FNEG:
2464     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2465   case ISD::FABS:
2466     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2467   case ISD::FSQRT:
2468     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2469   case ISD::FMA:
2470     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2471   case ISD::SMIN:
2472     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2473   case ISD::SMAX:
2474     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2475   case ISD::UMIN:
2476     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2477   case ISD::UMAX:
2478     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2479   case ISD::FMINNUM:
2480     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2481   case ISD::FMAXNUM:
2482     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2483   case ISD::ABS:
2484     return lowerABS(Op, DAG);
2485   case ISD::VSELECT:
2486     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2487   case ISD::FCOPYSIGN:
2488     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2489   case ISD::MGATHER:
2490     return lowerMGATHER(Op, DAG);
2491   case ISD::MSCATTER:
2492     return lowerMSCATTER(Op, DAG);
2493   case ISD::FLT_ROUNDS_:
2494     return lowerGET_ROUNDING(Op, DAG);
2495   case ISD::SET_ROUNDING:
2496     return lowerSET_ROUNDING(Op, DAG);
2497   case ISD::VP_ADD:
2498     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2499   case ISD::VP_SUB:
2500     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2501   case ISD::VP_MUL:
2502     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2503   case ISD::VP_SDIV:
2504     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2505   case ISD::VP_UDIV:
2506     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2507   case ISD::VP_SREM:
2508     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2509   case ISD::VP_UREM:
2510     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2511   case ISD::VP_AND:
2512     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2513   case ISD::VP_OR:
2514     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2515   case ISD::VP_XOR:
2516     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2517   case ISD::VP_ASHR:
2518     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2519   case ISD::VP_LSHR:
2520     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2521   case ISD::VP_SHL:
2522     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2523   case ISD::VP_FADD:
2524     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2525   case ISD::VP_FSUB:
2526     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2527   case ISD::VP_FMUL:
2528     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2529   case ISD::VP_FDIV:
2530     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2531   }
2532 }
2533 
2534 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2535                              SelectionDAG &DAG, unsigned Flags) {
2536   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2537 }
2538 
2539 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2540                              SelectionDAG &DAG, unsigned Flags) {
2541   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2542                                    Flags);
2543 }
2544 
2545 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2546                              SelectionDAG &DAG, unsigned Flags) {
2547   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2548                                    N->getOffset(), Flags);
2549 }
2550 
2551 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2552                              SelectionDAG &DAG, unsigned Flags) {
2553   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2554 }
2555 
2556 template <class NodeTy>
2557 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2558                                      bool IsLocal) const {
2559   SDLoc DL(N);
2560   EVT Ty = getPointerTy(DAG.getDataLayout());
2561 
2562   if (isPositionIndependent()) {
2563     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2564     if (IsLocal)
2565       // Use PC-relative addressing to access the symbol. This generates the
2566       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2567       // %pcrel_lo(auipc)).
2568       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2569 
2570     // Use PC-relative addressing to access the GOT for this symbol, then load
2571     // the address from the GOT. This generates the pattern (PseudoLA sym),
2572     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2573     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2574   }
2575 
2576   switch (getTargetMachine().getCodeModel()) {
2577   default:
2578     report_fatal_error("Unsupported code model for lowering");
2579   case CodeModel::Small: {
2580     // Generate a sequence for accessing addresses within the first 2 GiB of
2581     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2582     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2583     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2584     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2585     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2586   }
2587   case CodeModel::Medium: {
2588     // Generate a sequence for accessing addresses within any 2GiB range within
2589     // the address space. This generates the pattern (PseudoLLA sym), which
2590     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2591     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2592     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2593   }
2594   }
2595 }
2596 
2597 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2598                                                 SelectionDAG &DAG) const {
2599   SDLoc DL(Op);
2600   EVT Ty = Op.getValueType();
2601   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2602   int64_t Offset = N->getOffset();
2603   MVT XLenVT = Subtarget.getXLenVT();
2604 
2605   const GlobalValue *GV = N->getGlobal();
2606   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2607   SDValue Addr = getAddr(N, DAG, IsLocal);
2608 
2609   // In order to maximise the opportunity for common subexpression elimination,
2610   // emit a separate ADD node for the global address offset instead of folding
2611   // it in the global address node. Later peephole optimisations may choose to
2612   // fold it back in when profitable.
2613   if (Offset != 0)
2614     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2615                        DAG.getConstant(Offset, DL, XLenVT));
2616   return Addr;
2617 }
2618 
2619 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2620                                                SelectionDAG &DAG) const {
2621   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2622 
2623   return getAddr(N, DAG);
2624 }
2625 
2626 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2627                                                SelectionDAG &DAG) const {
2628   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2629 
2630   return getAddr(N, DAG);
2631 }
2632 
2633 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2634                                             SelectionDAG &DAG) const {
2635   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2636 
2637   return getAddr(N, DAG);
2638 }
2639 
2640 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2641                                               SelectionDAG &DAG,
2642                                               bool UseGOT) const {
2643   SDLoc DL(N);
2644   EVT Ty = getPointerTy(DAG.getDataLayout());
2645   const GlobalValue *GV = N->getGlobal();
2646   MVT XLenVT = Subtarget.getXLenVT();
2647 
2648   if (UseGOT) {
2649     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2650     // load the address from the GOT and add the thread pointer. This generates
2651     // the pattern (PseudoLA_TLS_IE sym), which expands to
2652     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2653     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2654     SDValue Load =
2655         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2656 
2657     // Add the thread pointer.
2658     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2659     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2660   }
2661 
2662   // Generate a sequence for accessing the address relative to the thread
2663   // pointer, with the appropriate adjustment for the thread pointer offset.
2664   // This generates the pattern
2665   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2666   SDValue AddrHi =
2667       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2668   SDValue AddrAdd =
2669       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2670   SDValue AddrLo =
2671       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2672 
2673   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2674   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2675   SDValue MNAdd = SDValue(
2676       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2677       0);
2678   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2679 }
2680 
2681 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2682                                                SelectionDAG &DAG) const {
2683   SDLoc DL(N);
2684   EVT Ty = getPointerTy(DAG.getDataLayout());
2685   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2686   const GlobalValue *GV = N->getGlobal();
2687 
2688   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2689   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2690   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2691   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2692   SDValue Load =
2693       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2694 
2695   // Prepare argument list to generate call.
2696   ArgListTy Args;
2697   ArgListEntry Entry;
2698   Entry.Node = Load;
2699   Entry.Ty = CallTy;
2700   Args.push_back(Entry);
2701 
2702   // Setup call to __tls_get_addr.
2703   TargetLowering::CallLoweringInfo CLI(DAG);
2704   CLI.setDebugLoc(DL)
2705       .setChain(DAG.getEntryNode())
2706       .setLibCallee(CallingConv::C, CallTy,
2707                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2708                     std::move(Args));
2709 
2710   return LowerCallTo(CLI).first;
2711 }
2712 
2713 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2714                                                    SelectionDAG &DAG) const {
2715   SDLoc DL(Op);
2716   EVT Ty = Op.getValueType();
2717   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2718   int64_t Offset = N->getOffset();
2719   MVT XLenVT = Subtarget.getXLenVT();
2720 
2721   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2722 
2723   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2724       CallingConv::GHC)
2725     report_fatal_error("In GHC calling convention TLS is not supported");
2726 
2727   SDValue Addr;
2728   switch (Model) {
2729   case TLSModel::LocalExec:
2730     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2731     break;
2732   case TLSModel::InitialExec:
2733     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2734     break;
2735   case TLSModel::LocalDynamic:
2736   case TLSModel::GeneralDynamic:
2737     Addr = getDynamicTLSAddr(N, DAG);
2738     break;
2739   }
2740 
2741   // In order to maximise the opportunity for common subexpression elimination,
2742   // emit a separate ADD node for the global address offset instead of folding
2743   // it in the global address node. Later peephole optimisations may choose to
2744   // fold it back in when profitable.
2745   if (Offset != 0)
2746     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2747                        DAG.getConstant(Offset, DL, XLenVT));
2748   return Addr;
2749 }
2750 
2751 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2752   SDValue CondV = Op.getOperand(0);
2753   SDValue TrueV = Op.getOperand(1);
2754   SDValue FalseV = Op.getOperand(2);
2755   SDLoc DL(Op);
2756   MVT VT = Op.getSimpleValueType();
2757   MVT XLenVT = Subtarget.getXLenVT();
2758 
2759   // Lower vector SELECTs to VSELECTs by splatting the condition.
2760   if (VT.isVector()) {
2761     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2762     SDValue CondSplat = VT.isScalableVector()
2763                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2764                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2765     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2766   }
2767 
2768   // If the result type is XLenVT and CondV is the output of a SETCC node
2769   // which also operated on XLenVT inputs, then merge the SETCC node into the
2770   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2771   // compare+branch instructions. i.e.:
2772   // (select (setcc lhs, rhs, cc), truev, falsev)
2773   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2774   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2775       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2776     SDValue LHS = CondV.getOperand(0);
2777     SDValue RHS = CondV.getOperand(1);
2778     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2779     ISD::CondCode CCVal = CC->get();
2780 
2781     // Special case for a select of 2 constants that have a diffence of 1.
2782     // Normally this is done by DAGCombine, but if the select is introduced by
2783     // type legalization or op legalization, we miss it. Restricting to SETLT
2784     // case for now because that is what signed saturating add/sub need.
2785     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2786     // but we would probably want to swap the true/false values if the condition
2787     // is SETGE/SETLE to avoid an XORI.
2788     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2789         CCVal == ISD::SETLT) {
2790       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2791       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2792       if (TrueVal - 1 == FalseVal)
2793         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2794       if (TrueVal + 1 == FalseVal)
2795         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2796     }
2797 
2798     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2799 
2800     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2801     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2802     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2803   }
2804 
2805   // Otherwise:
2806   // (select condv, truev, falsev)
2807   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2808   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2809   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2810 
2811   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2812 
2813   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2814 }
2815 
2816 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2817   SDValue CondV = Op.getOperand(1);
2818   SDLoc DL(Op);
2819   MVT XLenVT = Subtarget.getXLenVT();
2820 
2821   if (CondV.getOpcode() == ISD::SETCC &&
2822       CondV.getOperand(0).getValueType() == XLenVT) {
2823     SDValue LHS = CondV.getOperand(0);
2824     SDValue RHS = CondV.getOperand(1);
2825     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2826 
2827     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2828 
2829     SDValue TargetCC = DAG.getCondCode(CCVal);
2830     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2831                        LHS, RHS, TargetCC, Op.getOperand(2));
2832   }
2833 
2834   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2835                      CondV, DAG.getConstant(0, DL, XLenVT),
2836                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2837 }
2838 
2839 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2840   MachineFunction &MF = DAG.getMachineFunction();
2841   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2842 
2843   SDLoc DL(Op);
2844   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2845                                  getPointerTy(MF.getDataLayout()));
2846 
2847   // vastart just stores the address of the VarArgsFrameIndex slot into the
2848   // memory location argument.
2849   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2850   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2851                       MachinePointerInfo(SV));
2852 }
2853 
2854 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2855                                             SelectionDAG &DAG) const {
2856   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2857   MachineFunction &MF = DAG.getMachineFunction();
2858   MachineFrameInfo &MFI = MF.getFrameInfo();
2859   MFI.setFrameAddressIsTaken(true);
2860   Register FrameReg = RI.getFrameRegister(MF);
2861   int XLenInBytes = Subtarget.getXLen() / 8;
2862 
2863   EVT VT = Op.getValueType();
2864   SDLoc DL(Op);
2865   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2866   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2867   while (Depth--) {
2868     int Offset = -(XLenInBytes * 2);
2869     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2870                               DAG.getIntPtrConstant(Offset, DL));
2871     FrameAddr =
2872         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2873   }
2874   return FrameAddr;
2875 }
2876 
2877 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2878                                              SelectionDAG &DAG) const {
2879   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2880   MachineFunction &MF = DAG.getMachineFunction();
2881   MachineFrameInfo &MFI = MF.getFrameInfo();
2882   MFI.setReturnAddressIsTaken(true);
2883   MVT XLenVT = Subtarget.getXLenVT();
2884   int XLenInBytes = Subtarget.getXLen() / 8;
2885 
2886   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2887     return SDValue();
2888 
2889   EVT VT = Op.getValueType();
2890   SDLoc DL(Op);
2891   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2892   if (Depth) {
2893     int Off = -XLenInBytes;
2894     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2895     SDValue Offset = DAG.getConstant(Off, DL, VT);
2896     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2897                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2898                        MachinePointerInfo());
2899   }
2900 
2901   // Return the value of the return address register, marking it an implicit
2902   // live-in.
2903   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2904   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2905 }
2906 
2907 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2908                                                  SelectionDAG &DAG) const {
2909   SDLoc DL(Op);
2910   SDValue Lo = Op.getOperand(0);
2911   SDValue Hi = Op.getOperand(1);
2912   SDValue Shamt = Op.getOperand(2);
2913   EVT VT = Lo.getValueType();
2914 
2915   // if Shamt-XLEN < 0: // Shamt < XLEN
2916   //   Lo = Lo << Shamt
2917   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2918   // else:
2919   //   Lo = 0
2920   //   Hi = Lo << (Shamt-XLEN)
2921 
2922   SDValue Zero = DAG.getConstant(0, DL, VT);
2923   SDValue One = DAG.getConstant(1, DL, VT);
2924   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2925   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2926   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2927   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2928 
2929   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2930   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2931   SDValue ShiftRightLo =
2932       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2933   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2934   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2935   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2936 
2937   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2938 
2939   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2940   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2941 
2942   SDValue Parts[2] = {Lo, Hi};
2943   return DAG.getMergeValues(Parts, DL);
2944 }
2945 
2946 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2947                                                   bool IsSRA) const {
2948   SDLoc DL(Op);
2949   SDValue Lo = Op.getOperand(0);
2950   SDValue Hi = Op.getOperand(1);
2951   SDValue Shamt = Op.getOperand(2);
2952   EVT VT = Lo.getValueType();
2953 
2954   // SRA expansion:
2955   //   if Shamt-XLEN < 0: // Shamt < XLEN
2956   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2957   //     Hi = Hi >>s Shamt
2958   //   else:
2959   //     Lo = Hi >>s (Shamt-XLEN);
2960   //     Hi = Hi >>s (XLEN-1)
2961   //
2962   // SRL expansion:
2963   //   if Shamt-XLEN < 0: // Shamt < XLEN
2964   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2965   //     Hi = Hi >>u Shamt
2966   //   else:
2967   //     Lo = Hi >>u (Shamt-XLEN);
2968   //     Hi = 0;
2969 
2970   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2971 
2972   SDValue Zero = DAG.getConstant(0, DL, VT);
2973   SDValue One = DAG.getConstant(1, DL, VT);
2974   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2975   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2976   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2977   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2978 
2979   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2980   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2981   SDValue ShiftLeftHi =
2982       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2983   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2984   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2985   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2986   SDValue HiFalse =
2987       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2988 
2989   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2990 
2991   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2992   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2993 
2994   SDValue Parts[2] = {Lo, Hi};
2995   return DAG.getMergeValues(Parts, DL);
2996 }
2997 
2998 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2999 // legal equivalently-sized i8 type, so we can use that as a go-between.
3000 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3001                                                   SelectionDAG &DAG) const {
3002   SDLoc DL(Op);
3003   MVT VT = Op.getSimpleValueType();
3004   SDValue SplatVal = Op.getOperand(0);
3005   // All-zeros or all-ones splats are handled specially.
3006   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3007     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3008     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3009   }
3010   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3011     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3012     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3013   }
3014   MVT XLenVT = Subtarget.getXLenVT();
3015   assert(SplatVal.getValueType() == XLenVT &&
3016          "Unexpected type for i1 splat value");
3017   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3018   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3019                          DAG.getConstant(1, DL, XLenVT));
3020   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3021   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3022   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3023 }
3024 
3025 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3026 // illegal (currently only vXi64 RV32).
3027 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3028 // them to SPLAT_VECTOR_I64
3029 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3030                                                      SelectionDAG &DAG) const {
3031   SDLoc DL(Op);
3032   MVT VecVT = Op.getSimpleValueType();
3033   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3034          "Unexpected SPLAT_VECTOR_PARTS lowering");
3035 
3036   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3037   SDValue Lo = Op.getOperand(0);
3038   SDValue Hi = Op.getOperand(1);
3039 
3040   if (VecVT.isFixedLengthVector()) {
3041     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3042     SDLoc DL(Op);
3043     SDValue Mask, VL;
3044     std::tie(Mask, VL) =
3045         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3046 
3047     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3048     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3049   }
3050 
3051   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3052     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3053     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3054     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3055     // node in order to try and match RVV vector/scalar instructions.
3056     if ((LoC >> 31) == HiC)
3057       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3058   }
3059 
3060   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3061   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3062       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3063       Hi.getConstantOperandVal(1) == 31)
3064     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3065 
3066   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3067   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3068                      DAG.getRegister(RISCV::X0, MVT::i64));
3069 }
3070 
3071 // Custom-lower extensions from mask vectors by using a vselect either with 1
3072 // for zero/any-extension or -1 for sign-extension:
3073 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3074 // Note that any-extension is lowered identically to zero-extension.
3075 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3076                                                 int64_t ExtTrueVal) const {
3077   SDLoc DL(Op);
3078   MVT VecVT = Op.getSimpleValueType();
3079   SDValue Src = Op.getOperand(0);
3080   // Only custom-lower extensions from mask types
3081   assert(Src.getValueType().isVector() &&
3082          Src.getValueType().getVectorElementType() == MVT::i1);
3083 
3084   MVT XLenVT = Subtarget.getXLenVT();
3085   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3086   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3087 
3088   if (VecVT.isScalableVector()) {
3089     // Be careful not to introduce illegal scalar types at this stage, and be
3090     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3091     // illegal and must be expanded. Since we know that the constants are
3092     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3093     bool IsRV32E64 =
3094         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3095 
3096     if (!IsRV32E64) {
3097       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3098       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3099     } else {
3100       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3101       SplatTrueVal =
3102           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3103     }
3104 
3105     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3106   }
3107 
3108   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3109   MVT I1ContainerVT =
3110       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3111 
3112   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3113 
3114   SDValue Mask, VL;
3115   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3116 
3117   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3118   SplatTrueVal =
3119       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3120   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3121                                SplatTrueVal, SplatZero, VL);
3122 
3123   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3124 }
3125 
3126 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3127     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3128   MVT ExtVT = Op.getSimpleValueType();
3129   // Only custom-lower extensions from fixed-length vector types.
3130   if (!ExtVT.isFixedLengthVector())
3131     return Op;
3132   MVT VT = Op.getOperand(0).getSimpleValueType();
3133   // Grab the canonical container type for the extended type. Infer the smaller
3134   // type from that to ensure the same number of vector elements, as we know
3135   // the LMUL will be sufficient to hold the smaller type.
3136   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3137   // Get the extended container type manually to ensure the same number of
3138   // vector elements between source and dest.
3139   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3140                                      ContainerExtVT.getVectorElementCount());
3141 
3142   SDValue Op1 =
3143       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3144 
3145   SDLoc DL(Op);
3146   SDValue Mask, VL;
3147   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3148 
3149   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3150 
3151   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3152 }
3153 
3154 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3155 // setcc operation:
3156 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3157 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3158                                                   SelectionDAG &DAG) const {
3159   SDLoc DL(Op);
3160   EVT MaskVT = Op.getValueType();
3161   // Only expect to custom-lower truncations to mask types
3162   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3163          "Unexpected type for vector mask lowering");
3164   SDValue Src = Op.getOperand(0);
3165   MVT VecVT = Src.getSimpleValueType();
3166 
3167   // If this is a fixed vector, we need to convert it to a scalable vector.
3168   MVT ContainerVT = VecVT;
3169   if (VecVT.isFixedLengthVector()) {
3170     ContainerVT = getContainerForFixedLengthVector(VecVT);
3171     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3172   }
3173 
3174   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3175   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3176 
3177   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3178   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3179 
3180   if (VecVT.isScalableVector()) {
3181     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3182     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3183   }
3184 
3185   SDValue Mask, VL;
3186   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3187 
3188   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3189   SDValue Trunc =
3190       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3191   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3192                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3193   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3194 }
3195 
3196 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3197 // first position of a vector, and that vector is slid up to the insert index.
3198 // By limiting the active vector length to index+1 and merging with the
3199 // original vector (with an undisturbed tail policy for elements >= VL), we
3200 // achieve the desired result of leaving all elements untouched except the one
3201 // at VL-1, which is replaced with the desired value.
3202 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3203                                                     SelectionDAG &DAG) const {
3204   SDLoc DL(Op);
3205   MVT VecVT = Op.getSimpleValueType();
3206   SDValue Vec = Op.getOperand(0);
3207   SDValue Val = Op.getOperand(1);
3208   SDValue Idx = Op.getOperand(2);
3209 
3210   if (VecVT.getVectorElementType() == MVT::i1) {
3211     // FIXME: For now we just promote to an i8 vector and insert into that,
3212     // but this is probably not optimal.
3213     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3214     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3215     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3216     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3217   }
3218 
3219   MVT ContainerVT = VecVT;
3220   // If the operand is a fixed-length vector, convert to a scalable one.
3221   if (VecVT.isFixedLengthVector()) {
3222     ContainerVT = getContainerForFixedLengthVector(VecVT);
3223     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3224   }
3225 
3226   MVT XLenVT = Subtarget.getXLenVT();
3227 
3228   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3229   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3230   // Even i64-element vectors on RV32 can be lowered without scalar
3231   // legalization if the most-significant 32 bits of the value are not affected
3232   // by the sign-extension of the lower 32 bits.
3233   // TODO: We could also catch sign extensions of a 32-bit value.
3234   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3235     const auto *CVal = cast<ConstantSDNode>(Val);
3236     if (isInt<32>(CVal->getSExtValue())) {
3237       IsLegalInsert = true;
3238       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3239     }
3240   }
3241 
3242   SDValue Mask, VL;
3243   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3244 
3245   SDValue ValInVec;
3246 
3247   if (IsLegalInsert) {
3248     unsigned Opc =
3249         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3250     if (isNullConstant(Idx)) {
3251       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3252       if (!VecVT.isFixedLengthVector())
3253         return Vec;
3254       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3255     }
3256     ValInVec =
3257         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3258   } else {
3259     // On RV32, i64-element vectors must be specially handled to place the
3260     // value at element 0, by using two vslide1up instructions in sequence on
3261     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3262     // this.
3263     SDValue One = DAG.getConstant(1, DL, XLenVT);
3264     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3265     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3266     MVT I32ContainerVT =
3267         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3268     SDValue I32Mask =
3269         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3270     // Limit the active VL to two.
3271     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3272     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3273     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3274     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3275                            InsertI64VL);
3276     // First slide in the hi value, then the lo in underneath it.
3277     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3278                            ValHi, I32Mask, InsertI64VL);
3279     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3280                            ValLo, I32Mask, InsertI64VL);
3281     // Bitcast back to the right container type.
3282     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3283   }
3284 
3285   // Now that the value is in a vector, slide it into position.
3286   SDValue InsertVL =
3287       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3288   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3289                                 ValInVec, Idx, Mask, InsertVL);
3290   if (!VecVT.isFixedLengthVector())
3291     return Slideup;
3292   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3293 }
3294 
3295 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3296 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3297 // types this is done using VMV_X_S to allow us to glean information about the
3298 // sign bits of the result.
3299 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3300                                                      SelectionDAG &DAG) const {
3301   SDLoc DL(Op);
3302   SDValue Idx = Op.getOperand(1);
3303   SDValue Vec = Op.getOperand(0);
3304   EVT EltVT = Op.getValueType();
3305   MVT VecVT = Vec.getSimpleValueType();
3306   MVT XLenVT = Subtarget.getXLenVT();
3307 
3308   if (VecVT.getVectorElementType() == MVT::i1) {
3309     // FIXME: For now we just promote to an i8 vector and extract from that,
3310     // but this is probably not optimal.
3311     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3312     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3313     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3314   }
3315 
3316   // If this is a fixed vector, we need to convert it to a scalable vector.
3317   MVT ContainerVT = VecVT;
3318   if (VecVT.isFixedLengthVector()) {
3319     ContainerVT = getContainerForFixedLengthVector(VecVT);
3320     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3321   }
3322 
3323   // If the index is 0, the vector is already in the right position.
3324   if (!isNullConstant(Idx)) {
3325     // Use a VL of 1 to avoid processing more elements than we need.
3326     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3327     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3328     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3329     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3330                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3331   }
3332 
3333   if (!EltVT.isInteger()) {
3334     // Floating-point extracts are handled in TableGen.
3335     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3336                        DAG.getConstant(0, DL, XLenVT));
3337   }
3338 
3339   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3340   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3341 }
3342 
3343 // Some RVV intrinsics may claim that they want an integer operand to be
3344 // promoted or expanded.
3345 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3346                                           const RISCVSubtarget &Subtarget) {
3347   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3348           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3349          "Unexpected opcode");
3350 
3351   if (!Subtarget.hasStdExtV())
3352     return SDValue();
3353 
3354   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3355   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3356   SDLoc DL(Op);
3357 
3358   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3359       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3360   if (!II || !II->SplatOperand)
3361     return SDValue();
3362 
3363   unsigned SplatOp = II->SplatOperand + HasChain;
3364   assert(SplatOp < Op.getNumOperands());
3365 
3366   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3367   SDValue &ScalarOp = Operands[SplatOp];
3368   MVT OpVT = ScalarOp.getSimpleValueType();
3369   MVT XLenVT = Subtarget.getXLenVT();
3370 
3371   // If this isn't a scalar, or its type is XLenVT we're done.
3372   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3373     return SDValue();
3374 
3375   // Simplest case is that the operand needs to be promoted to XLenVT.
3376   if (OpVT.bitsLT(XLenVT)) {
3377     // If the operand is a constant, sign extend to increase our chances
3378     // of being able to use a .vi instruction. ANY_EXTEND would become a
3379     // a zero extend and the simm5 check in isel would fail.
3380     // FIXME: Should we ignore the upper bits in isel instead?
3381     unsigned ExtOpc =
3382         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3383     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3384     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3385   }
3386 
3387   // Use the previous operand to get the vXi64 VT. The result might be a mask
3388   // VT for compares. Using the previous operand assumes that the previous
3389   // operand will never have a smaller element size than a scalar operand and
3390   // that a widening operation never uses SEW=64.
3391   // NOTE: If this fails the below assert, we can probably just find the
3392   // element count from any operand or result and use it to construct the VT.
3393   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3394   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3395 
3396   // The more complex case is when the scalar is larger than XLenVT.
3397   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3398          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3399 
3400   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3401   // on the instruction to sign-extend since SEW>XLEN.
3402   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3403     if (isInt<32>(CVal->getSExtValue())) {
3404       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3405       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3406     }
3407   }
3408 
3409   // We need to convert the scalar to a splat vector.
3410   // FIXME: Can we implicitly truncate the scalar if it is known to
3411   // be sign extended?
3412   // VL should be the last operand.
3413   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3414   assert(VL.getValueType() == XLenVT);
3415   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3416   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3417 }
3418 
3419 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3420                                                      SelectionDAG &DAG) const {
3421   unsigned IntNo = Op.getConstantOperandVal(0);
3422   SDLoc DL(Op);
3423   MVT XLenVT = Subtarget.getXLenVT();
3424 
3425   switch (IntNo) {
3426   default:
3427     break; // Don't custom lower most intrinsics.
3428   case Intrinsic::thread_pointer: {
3429     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3430     return DAG.getRegister(RISCV::X4, PtrVT);
3431   }
3432   case Intrinsic::riscv_orc_b:
3433     // Lower to the GORCI encoding for orc.b.
3434     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3435                        DAG.getConstant(7, DL, XLenVT));
3436   case Intrinsic::riscv_grev:
3437   case Intrinsic::riscv_gorc: {
3438     unsigned Opc =
3439         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3440     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3441   }
3442   case Intrinsic::riscv_shfl:
3443   case Intrinsic::riscv_unshfl: {
3444     unsigned Opc =
3445         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3446     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3447   }
3448   case Intrinsic::riscv_bcompress:
3449   case Intrinsic::riscv_bdecompress: {
3450     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3451                                                        : RISCVISD::BDECOMPRESS;
3452     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3453   }
3454   case Intrinsic::riscv_vmv_x_s:
3455     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3456     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3457                        Op.getOperand(1));
3458   case Intrinsic::riscv_vmv_v_x:
3459     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3460                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3461   case Intrinsic::riscv_vfmv_v_f:
3462     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3463                        Op.getOperand(1), Op.getOperand(2));
3464   case Intrinsic::riscv_vmv_s_x: {
3465     SDValue Scalar = Op.getOperand(2);
3466 
3467     if (Scalar.getValueType().bitsLE(XLenVT)) {
3468       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3469       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3470                          Op.getOperand(1), Scalar, Op.getOperand(3));
3471     }
3472 
3473     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3474 
3475     // This is an i64 value that lives in two scalar registers. We have to
3476     // insert this in a convoluted way. First we build vXi64 splat containing
3477     // the/ two values that we assemble using some bit math. Next we'll use
3478     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3479     // to merge element 0 from our splat into the source vector.
3480     // FIXME: This is probably not the best way to do this, but it is
3481     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3482     // point.
3483     //   sw lo, (a0)
3484     //   sw hi, 4(a0)
3485     //   vlse vX, (a0)
3486     //
3487     //   vid.v      vVid
3488     //   vmseq.vx   mMask, vVid, 0
3489     //   vmerge.vvm vDest, vSrc, vVal, mMask
3490     MVT VT = Op.getSimpleValueType();
3491     SDValue Vec = Op.getOperand(1);
3492     SDValue VL = Op.getOperand(3);
3493 
3494     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3495     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3496                                       DAG.getConstant(0, DL, MVT::i32), VL);
3497 
3498     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3499     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3500     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3501     SDValue SelectCond =
3502         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3503                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3504     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3505                        Vec, VL);
3506   }
3507   case Intrinsic::riscv_vslide1up:
3508   case Intrinsic::riscv_vslide1down:
3509   case Intrinsic::riscv_vslide1up_mask:
3510   case Intrinsic::riscv_vslide1down_mask: {
3511     // We need to special case these when the scalar is larger than XLen.
3512     unsigned NumOps = Op.getNumOperands();
3513     bool IsMasked = NumOps == 6;
3514     unsigned OpOffset = IsMasked ? 1 : 0;
3515     SDValue Scalar = Op.getOperand(2 + OpOffset);
3516     if (Scalar.getValueType().bitsLE(XLenVT))
3517       break;
3518 
3519     // Splatting a sign extended constant is fine.
3520     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3521       if (isInt<32>(CVal->getSExtValue()))
3522         break;
3523 
3524     MVT VT = Op.getSimpleValueType();
3525     assert(VT.getVectorElementType() == MVT::i64 &&
3526            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3527 
3528     // Convert the vector source to the equivalent nxvXi32 vector.
3529     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3530     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3531 
3532     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3533                                    DAG.getConstant(0, DL, XLenVT));
3534     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3535                                    DAG.getConstant(1, DL, XLenVT));
3536 
3537     // Double the VL since we halved SEW.
3538     SDValue VL = Op.getOperand(NumOps - 1);
3539     SDValue I32VL =
3540         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3541 
3542     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3543     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3544 
3545     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3546     // instructions.
3547     if (IntNo == Intrinsic::riscv_vslide1up ||
3548         IntNo == Intrinsic::riscv_vslide1up_mask) {
3549       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3550                         I32Mask, I32VL);
3551       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3552                         I32Mask, I32VL);
3553     } else {
3554       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3555                         I32Mask, I32VL);
3556       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3557                         I32Mask, I32VL);
3558     }
3559 
3560     // Convert back to nxvXi64.
3561     Vec = DAG.getBitcast(VT, Vec);
3562 
3563     if (!IsMasked)
3564       return Vec;
3565 
3566     // Apply mask after the operation.
3567     SDValue Mask = Op.getOperand(NumOps - 2);
3568     SDValue MaskedOff = Op.getOperand(1);
3569     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3570   }
3571   }
3572 
3573   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3574 }
3575 
3576 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3577                                                     SelectionDAG &DAG) const {
3578   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3579 }
3580 
3581 static MVT getLMUL1VT(MVT VT) {
3582   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3583          "Unexpected vector MVT");
3584   return MVT::getScalableVectorVT(
3585       VT.getVectorElementType(),
3586       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3587 }
3588 
3589 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3590   switch (ISDOpcode) {
3591   default:
3592     llvm_unreachable("Unhandled reduction");
3593   case ISD::VECREDUCE_ADD:
3594     return RISCVISD::VECREDUCE_ADD_VL;
3595   case ISD::VECREDUCE_UMAX:
3596     return RISCVISD::VECREDUCE_UMAX_VL;
3597   case ISD::VECREDUCE_SMAX:
3598     return RISCVISD::VECREDUCE_SMAX_VL;
3599   case ISD::VECREDUCE_UMIN:
3600     return RISCVISD::VECREDUCE_UMIN_VL;
3601   case ISD::VECREDUCE_SMIN:
3602     return RISCVISD::VECREDUCE_SMIN_VL;
3603   case ISD::VECREDUCE_AND:
3604     return RISCVISD::VECREDUCE_AND_VL;
3605   case ISD::VECREDUCE_OR:
3606     return RISCVISD::VECREDUCE_OR_VL;
3607   case ISD::VECREDUCE_XOR:
3608     return RISCVISD::VECREDUCE_XOR_VL;
3609   }
3610 }
3611 
3612 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3613                                                       SelectionDAG &DAG) const {
3614   SDLoc DL(Op);
3615   SDValue Vec = Op.getOperand(0);
3616   MVT VecVT = Vec.getSimpleValueType();
3617   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3618           Op.getOpcode() == ISD::VECREDUCE_OR ||
3619           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3620          "Unexpected reduction lowering");
3621 
3622   MVT XLenVT = Subtarget.getXLenVT();
3623   assert(Op.getValueType() == XLenVT &&
3624          "Expected reduction output to be legalized to XLenVT");
3625 
3626   MVT ContainerVT = VecVT;
3627   if (VecVT.isFixedLengthVector()) {
3628     ContainerVT = getContainerForFixedLengthVector(VecVT);
3629     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3630   }
3631 
3632   SDValue Mask, VL;
3633   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3634   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3635 
3636   switch (Op.getOpcode()) {
3637   default:
3638     llvm_unreachable("Unhandled reduction");
3639   case ISD::VECREDUCE_AND:
3640     // vpopc ~x == 0
3641     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3642     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3643     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3644   case ISD::VECREDUCE_OR:
3645     // vpopc x != 0
3646     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3647     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3648   case ISD::VECREDUCE_XOR: {
3649     // ((vpopc x) & 1) != 0
3650     SDValue One = DAG.getConstant(1, DL, XLenVT);
3651     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3652     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3653     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3654   }
3655   }
3656 }
3657 
3658 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3659                                             SelectionDAG &DAG) const {
3660   SDLoc DL(Op);
3661   SDValue Vec = Op.getOperand(0);
3662   EVT VecEVT = Vec.getValueType();
3663 
3664   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3665 
3666   // Due to ordering in legalize types we may have a vector type that needs to
3667   // be split. Do that manually so we can get down to a legal type.
3668   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3669          TargetLowering::TypeSplitVector) {
3670     SDValue Lo, Hi;
3671     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3672     VecEVT = Lo.getValueType();
3673     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3674   }
3675 
3676   // TODO: The type may need to be widened rather than split. Or widened before
3677   // it can be split.
3678   if (!isTypeLegal(VecEVT))
3679     return SDValue();
3680 
3681   MVT VecVT = VecEVT.getSimpleVT();
3682   MVT VecEltVT = VecVT.getVectorElementType();
3683   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3684 
3685   MVT ContainerVT = VecVT;
3686   if (VecVT.isFixedLengthVector()) {
3687     ContainerVT = getContainerForFixedLengthVector(VecVT);
3688     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3689   }
3690 
3691   MVT M1VT = getLMUL1VT(ContainerVT);
3692 
3693   SDValue Mask, VL;
3694   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3695 
3696   // FIXME: This is a VLMAX splat which might be too large and can prevent
3697   // vsetvli removal.
3698   SDValue NeutralElem =
3699       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3700   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3701   SDValue Reduction =
3702       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3703   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3704                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3705   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3706 }
3707 
3708 // Given a reduction op, this function returns the matching reduction opcode,
3709 // the vector SDValue and the scalar SDValue required to lower this to a
3710 // RISCVISD node.
3711 static std::tuple<unsigned, SDValue, SDValue>
3712 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3713   SDLoc DL(Op);
3714   auto Flags = Op->getFlags();
3715   unsigned Opcode = Op.getOpcode();
3716   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3717   switch (Opcode) {
3718   default:
3719     llvm_unreachable("Unhandled reduction");
3720   case ISD::VECREDUCE_FADD:
3721     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3722                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3723   case ISD::VECREDUCE_SEQ_FADD:
3724     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3725                            Op.getOperand(0));
3726   case ISD::VECREDUCE_FMIN:
3727     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3728                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3729   case ISD::VECREDUCE_FMAX:
3730     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3731                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3732   }
3733 }
3734 
3735 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3736                                               SelectionDAG &DAG) const {
3737   SDLoc DL(Op);
3738   MVT VecEltVT = Op.getSimpleValueType();
3739 
3740   unsigned RVVOpcode;
3741   SDValue VectorVal, ScalarVal;
3742   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3743       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3744   MVT VecVT = VectorVal.getSimpleValueType();
3745 
3746   MVT ContainerVT = VecVT;
3747   if (VecVT.isFixedLengthVector()) {
3748     ContainerVT = getContainerForFixedLengthVector(VecVT);
3749     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3750   }
3751 
3752   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3753 
3754   SDValue Mask, VL;
3755   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3756 
3757   // FIXME: This is a VLMAX splat which might be too large and can prevent
3758   // vsetvli removal.
3759   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3760   SDValue Reduction =
3761       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3762   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3763                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3764 }
3765 
3766 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3767                                                    SelectionDAG &DAG) const {
3768   SDValue Vec = Op.getOperand(0);
3769   SDValue SubVec = Op.getOperand(1);
3770   MVT VecVT = Vec.getSimpleValueType();
3771   MVT SubVecVT = SubVec.getSimpleValueType();
3772 
3773   SDLoc DL(Op);
3774   MVT XLenVT = Subtarget.getXLenVT();
3775   unsigned OrigIdx = Op.getConstantOperandVal(2);
3776   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3777 
3778   // We don't have the ability to slide mask vectors up indexed by their i1
3779   // elements; the smallest we can do is i8. Often we are able to bitcast to
3780   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3781   // into a scalable one, we might not necessarily have enough scalable
3782   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3783   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3784       (OrigIdx != 0 || !Vec.isUndef())) {
3785     if (VecVT.getVectorMinNumElements() >= 8 &&
3786         SubVecVT.getVectorMinNumElements() >= 8) {
3787       assert(OrigIdx % 8 == 0 && "Invalid index");
3788       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3789              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3790              "Unexpected mask vector lowering");
3791       OrigIdx /= 8;
3792       SubVecVT =
3793           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3794                            SubVecVT.isScalableVector());
3795       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3796                                VecVT.isScalableVector());
3797       Vec = DAG.getBitcast(VecVT, Vec);
3798       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3799     } else {
3800       // We can't slide this mask vector up indexed by its i1 elements.
3801       // This poses a problem when we wish to insert a scalable vector which
3802       // can't be re-expressed as a larger type. Just choose the slow path and
3803       // extend to a larger type, then truncate back down.
3804       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3805       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3806       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3807       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3808       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3809                         Op.getOperand(2));
3810       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3811       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3812     }
3813   }
3814 
3815   // If the subvector vector is a fixed-length type, we cannot use subregister
3816   // manipulation to simplify the codegen; we don't know which register of a
3817   // LMUL group contains the specific subvector as we only know the minimum
3818   // register size. Therefore we must slide the vector group up the full
3819   // amount.
3820   if (SubVecVT.isFixedLengthVector()) {
3821     if (OrigIdx == 0 && Vec.isUndef())
3822       return Op;
3823     MVT ContainerVT = VecVT;
3824     if (VecVT.isFixedLengthVector()) {
3825       ContainerVT = getContainerForFixedLengthVector(VecVT);
3826       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3827     }
3828     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3829                          DAG.getUNDEF(ContainerVT), SubVec,
3830                          DAG.getConstant(0, DL, XLenVT));
3831     SDValue Mask =
3832         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3833     // Set the vector length to only the number of elements we care about. Note
3834     // that for slideup this includes the offset.
3835     SDValue VL =
3836         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3837     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3838     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3839                                   SubVec, SlideupAmt, Mask, VL);
3840     if (VecVT.isFixedLengthVector())
3841       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3842     return DAG.getBitcast(Op.getValueType(), Slideup);
3843   }
3844 
3845   unsigned SubRegIdx, RemIdx;
3846   std::tie(SubRegIdx, RemIdx) =
3847       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3848           VecVT, SubVecVT, OrigIdx, TRI);
3849 
3850   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3851   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3852                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3853                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3854 
3855   // 1. If the Idx has been completely eliminated and this subvector's size is
3856   // a vector register or a multiple thereof, or the surrounding elements are
3857   // undef, then this is a subvector insert which naturally aligns to a vector
3858   // register. These can easily be handled using subregister manipulation.
3859   // 2. If the subvector is smaller than a vector register, then the insertion
3860   // must preserve the undisturbed elements of the register. We do this by
3861   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3862   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3863   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3864   // LMUL=1 type back into the larger vector (resolving to another subregister
3865   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3866   // to avoid allocating a large register group to hold our subvector.
3867   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3868     return Op;
3869 
3870   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3871   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3872   // (in our case undisturbed). This means we can set up a subvector insertion
3873   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3874   // size of the subvector.
3875   MVT InterSubVT = VecVT;
3876   SDValue AlignedExtract = Vec;
3877   unsigned AlignedIdx = OrigIdx - RemIdx;
3878   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3879     InterSubVT = getLMUL1VT(VecVT);
3880     // Extract a subvector equal to the nearest full vector register type. This
3881     // should resolve to a EXTRACT_SUBREG instruction.
3882     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3883                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3884   }
3885 
3886   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3887   // For scalable vectors this must be further multiplied by vscale.
3888   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3889 
3890   SDValue Mask, VL;
3891   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3892 
3893   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3894   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3895   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3896   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3897 
3898   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3899                        DAG.getUNDEF(InterSubVT), SubVec,
3900                        DAG.getConstant(0, DL, XLenVT));
3901 
3902   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3903                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3904 
3905   // If required, insert this subvector back into the correct vector register.
3906   // This should resolve to an INSERT_SUBREG instruction.
3907   if (VecVT.bitsGT(InterSubVT))
3908     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3909                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3910 
3911   // We might have bitcast from a mask type: cast back to the original type if
3912   // required.
3913   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3914 }
3915 
3916 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3917                                                     SelectionDAG &DAG) const {
3918   SDValue Vec = Op.getOperand(0);
3919   MVT SubVecVT = Op.getSimpleValueType();
3920   MVT VecVT = Vec.getSimpleValueType();
3921 
3922   SDLoc DL(Op);
3923   MVT XLenVT = Subtarget.getXLenVT();
3924   unsigned OrigIdx = Op.getConstantOperandVal(1);
3925   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3926 
3927   // We don't have the ability to slide mask vectors down indexed by their i1
3928   // elements; the smallest we can do is i8. Often we are able to bitcast to
3929   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3930   // from a scalable one, we might not necessarily have enough scalable
3931   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3932   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3933     if (VecVT.getVectorMinNumElements() >= 8 &&
3934         SubVecVT.getVectorMinNumElements() >= 8) {
3935       assert(OrigIdx % 8 == 0 && "Invalid index");
3936       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3937              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3938              "Unexpected mask vector lowering");
3939       OrigIdx /= 8;
3940       SubVecVT =
3941           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3942                            SubVecVT.isScalableVector());
3943       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3944                                VecVT.isScalableVector());
3945       Vec = DAG.getBitcast(VecVT, Vec);
3946     } else {
3947       // We can't slide this mask vector down, indexed by its i1 elements.
3948       // This poses a problem when we wish to extract a scalable vector which
3949       // can't be re-expressed as a larger type. Just choose the slow path and
3950       // extend to a larger type, then truncate back down.
3951       // TODO: We could probably improve this when extracting certain fixed
3952       // from fixed, where we can extract as i8 and shift the correct element
3953       // right to reach the desired subvector?
3954       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3955       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3956       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3957       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3958                         Op.getOperand(1));
3959       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3960       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3961     }
3962   }
3963 
3964   // If the subvector vector is a fixed-length type, we cannot use subregister
3965   // manipulation to simplify the codegen; we don't know which register of a
3966   // LMUL group contains the specific subvector as we only know the minimum
3967   // register size. Therefore we must slide the vector group down the full
3968   // amount.
3969   if (SubVecVT.isFixedLengthVector()) {
3970     // With an index of 0 this is a cast-like subvector, which can be performed
3971     // with subregister operations.
3972     if (OrigIdx == 0)
3973       return Op;
3974     MVT ContainerVT = VecVT;
3975     if (VecVT.isFixedLengthVector()) {
3976       ContainerVT = getContainerForFixedLengthVector(VecVT);
3977       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3978     }
3979     SDValue Mask =
3980         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3981     // Set the vector length to only the number of elements we care about. This
3982     // avoids sliding down elements we're going to discard straight away.
3983     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3984     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3985     SDValue Slidedown =
3986         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3987                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3988     // Now we can use a cast-like subvector extract to get the result.
3989     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3990                             DAG.getConstant(0, DL, XLenVT));
3991     return DAG.getBitcast(Op.getValueType(), Slidedown);
3992   }
3993 
3994   unsigned SubRegIdx, RemIdx;
3995   std::tie(SubRegIdx, RemIdx) =
3996       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3997           VecVT, SubVecVT, OrigIdx, TRI);
3998 
3999   // If the Idx has been completely eliminated then this is a subvector extract
4000   // which naturally aligns to a vector register. These can easily be handled
4001   // using subregister manipulation.
4002   if (RemIdx == 0)
4003     return Op;
4004 
4005   // Else we must shift our vector register directly to extract the subvector.
4006   // Do this using VSLIDEDOWN.
4007 
4008   // If the vector type is an LMUL-group type, extract a subvector equal to the
4009   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4010   // instruction.
4011   MVT InterSubVT = VecVT;
4012   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4013     InterSubVT = getLMUL1VT(VecVT);
4014     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4015                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4016   }
4017 
4018   // Slide this vector register down by the desired number of elements in order
4019   // to place the desired subvector starting at element 0.
4020   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4021   // For scalable vectors this must be further multiplied by vscale.
4022   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4023 
4024   SDValue Mask, VL;
4025   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4026   SDValue Slidedown =
4027       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4028                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4029 
4030   // Now the vector is in the right position, extract our final subvector. This
4031   // should resolve to a COPY.
4032   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4033                           DAG.getConstant(0, DL, XLenVT));
4034 
4035   // We might have bitcast from a mask type: cast back to the original type if
4036   // required.
4037   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4038 }
4039 
4040 // Lower step_vector to the vid instruction. Any non-identity step value must
4041 // be accounted for my manual expansion.
4042 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4043                                               SelectionDAG &DAG) const {
4044   SDLoc DL(Op);
4045   MVT VT = Op.getSimpleValueType();
4046   MVT XLenVT = Subtarget.getXLenVT();
4047   SDValue Mask, VL;
4048   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4049   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4050   uint64_t StepValImm = Op.getConstantOperandVal(0);
4051   if (StepValImm != 1) {
4052     assert(Op.getOperand(0).getValueType() == XLenVT &&
4053            "Unexpected step value type");
4054     if (isPowerOf2_64(StepValImm)) {
4055       SDValue StepVal =
4056           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4057                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4058       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4059     } else {
4060       SDValue StepVal =
4061           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
4062       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4063     }
4064   }
4065   return StepVec;
4066 }
4067 
4068 // Implement vector_reverse using vrgather.vv with indices determined by
4069 // subtracting the id of each element from (VLMAX-1). This will convert
4070 // the indices like so:
4071 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4072 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4073 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4074                                                  SelectionDAG &DAG) const {
4075   SDLoc DL(Op);
4076   MVT VecVT = Op.getSimpleValueType();
4077   unsigned EltSize = VecVT.getScalarSizeInBits();
4078   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4079 
4080   unsigned MaxVLMAX = 0;
4081   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4082   if (VectorBitsMax != 0)
4083     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4084 
4085   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4086   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4087 
4088   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4089   // to use vrgatherei16.vv.
4090   // TODO: It's also possible to use vrgatherei16.vv for other types to
4091   // decrease register width for the index calculation.
4092   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4093     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4094     // Reverse each half, then reassemble them in reverse order.
4095     // NOTE: It's also possible that after splitting that VLMAX no longer
4096     // requires vrgatherei16.vv.
4097     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4098       SDValue Lo, Hi;
4099       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4100       EVT LoVT, HiVT;
4101       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4102       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4103       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4104       // Reassemble the low and high pieces reversed.
4105       // FIXME: This is a CONCAT_VECTORS.
4106       SDValue Res =
4107           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4108                       DAG.getIntPtrConstant(0, DL));
4109       return DAG.getNode(
4110           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4111           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4112     }
4113 
4114     // Just promote the int type to i16 which will double the LMUL.
4115     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4116     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4117   }
4118 
4119   MVT XLenVT = Subtarget.getXLenVT();
4120   SDValue Mask, VL;
4121   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4122 
4123   // Calculate VLMAX-1 for the desired SEW.
4124   unsigned MinElts = VecVT.getVectorMinNumElements();
4125   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4126                               DAG.getConstant(MinElts, DL, XLenVT));
4127   SDValue VLMinus1 =
4128       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4129 
4130   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4131   bool IsRV32E64 =
4132       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4133   SDValue SplatVL;
4134   if (!IsRV32E64)
4135     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4136   else
4137     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4138 
4139   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4140   SDValue Indices =
4141       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4142 
4143   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4144 }
4145 
4146 SDValue
4147 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4148                                                      SelectionDAG &DAG) const {
4149   SDLoc DL(Op);
4150   auto *Load = cast<LoadSDNode>(Op);
4151 
4152   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4153                                         Load->getMemoryVT(),
4154                                         *Load->getMemOperand()) &&
4155          "Expecting a correctly-aligned load");
4156 
4157   MVT VT = Op.getSimpleValueType();
4158   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4159 
4160   SDValue VL =
4161       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4162 
4163   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4164   SDValue NewLoad = DAG.getMemIntrinsicNode(
4165       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4166       Load->getMemoryVT(), Load->getMemOperand());
4167 
4168   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4169   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4170 }
4171 
4172 SDValue
4173 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4174                                                       SelectionDAG &DAG) const {
4175   SDLoc DL(Op);
4176   auto *Store = cast<StoreSDNode>(Op);
4177 
4178   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4179                                         Store->getMemoryVT(),
4180                                         *Store->getMemOperand()) &&
4181          "Expecting a correctly-aligned store");
4182 
4183   SDValue StoreVal = Store->getValue();
4184   MVT VT = StoreVal.getSimpleValueType();
4185 
4186   // If the size less than a byte, we need to pad with zeros to make a byte.
4187   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4188     VT = MVT::v8i1;
4189     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4190                            DAG.getConstant(0, DL, VT), StoreVal,
4191                            DAG.getIntPtrConstant(0, DL));
4192   }
4193 
4194   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4195 
4196   SDValue VL =
4197       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4198 
4199   SDValue NewValue =
4200       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4201   return DAG.getMemIntrinsicNode(
4202       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4203       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4204       Store->getMemoryVT(), Store->getMemOperand());
4205 }
4206 
4207 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4208   auto *Load = cast<MaskedLoadSDNode>(Op);
4209 
4210   SDLoc DL(Op);
4211   MVT VT = Op.getSimpleValueType();
4212   MVT XLenVT = Subtarget.getXLenVT();
4213 
4214   SDValue Mask = Load->getMask();
4215   SDValue PassThru = Load->getPassThru();
4216   SDValue VL;
4217 
4218   MVT ContainerVT = VT;
4219   if (VT.isFixedLengthVector()) {
4220     ContainerVT = getContainerForFixedLengthVector(VT);
4221     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4222 
4223     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4224     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4225     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4226   } else
4227     VL = DAG.getRegister(RISCV::X0, XLenVT);
4228 
4229   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4230   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4231   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4232                    Load->getBasePtr(), Mask,  VL};
4233   SDValue Result =
4234       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4235                               Load->getMemoryVT(), Load->getMemOperand());
4236   SDValue Chain = Result.getValue(1);
4237 
4238   if (VT.isFixedLengthVector())
4239     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4240 
4241   return DAG.getMergeValues({Result, Chain}, DL);
4242 }
4243 
4244 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4245   auto *Store = cast<MaskedStoreSDNode>(Op);
4246 
4247   SDLoc DL(Op);
4248   SDValue Val = Store->getValue();
4249   SDValue Mask = Store->getMask();
4250   MVT VT = Val.getSimpleValueType();
4251   MVT XLenVT = Subtarget.getXLenVT();
4252   SDValue VL;
4253 
4254   MVT ContainerVT = VT;
4255   if (VT.isFixedLengthVector()) {
4256     ContainerVT = getContainerForFixedLengthVector(VT);
4257     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4258 
4259     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4260     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4261     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4262   } else
4263     VL = DAG.getRegister(RISCV::X0, XLenVT);
4264 
4265   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4266   return DAG.getMemIntrinsicNode(
4267       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4268       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4269       Store->getMemoryVT(), Store->getMemOperand());
4270 }
4271 
4272 SDValue
4273 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4274                                                       SelectionDAG &DAG) const {
4275   MVT InVT = Op.getOperand(0).getSimpleValueType();
4276   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4277 
4278   MVT VT = Op.getSimpleValueType();
4279 
4280   SDValue Op1 =
4281       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4282   SDValue Op2 =
4283       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4284 
4285   SDLoc DL(Op);
4286   SDValue VL =
4287       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4288 
4289   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4290   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4291 
4292   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4293                             Op.getOperand(2), Mask, VL);
4294 
4295   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4296 }
4297 
4298 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4299     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4300   MVT VT = Op.getSimpleValueType();
4301 
4302   if (VT.getVectorElementType() == MVT::i1)
4303     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4304 
4305   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4306 }
4307 
4308 SDValue
4309 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4310                                                       SelectionDAG &DAG) const {
4311   unsigned Opc;
4312   switch (Op.getOpcode()) {
4313   default: llvm_unreachable("Unexpected opcode!");
4314   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4315   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4316   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4317   }
4318 
4319   return lowerToScalableOp(Op, DAG, Opc);
4320 }
4321 
4322 // Lower vector ABS to smax(X, sub(0, X)).
4323 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4324   SDLoc DL(Op);
4325   MVT VT = Op.getSimpleValueType();
4326   SDValue X = Op.getOperand(0);
4327 
4328   assert(VT.isFixedLengthVector() && "Unexpected type");
4329 
4330   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4331   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4332 
4333   SDValue Mask, VL;
4334   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4335 
4336   SDValue SplatZero =
4337       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4338                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4339   SDValue NegX =
4340       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4341   SDValue Max =
4342       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4343 
4344   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4345 }
4346 
4347 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4348     SDValue Op, SelectionDAG &DAG) const {
4349   SDLoc DL(Op);
4350   MVT VT = Op.getSimpleValueType();
4351   SDValue Mag = Op.getOperand(0);
4352   SDValue Sign = Op.getOperand(1);
4353   assert(Mag.getValueType() == Sign.getValueType() &&
4354          "Can only handle COPYSIGN with matching types.");
4355 
4356   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4357   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4358   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4359 
4360   SDValue Mask, VL;
4361   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4362 
4363   SDValue CopySign =
4364       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4365 
4366   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4367 }
4368 
4369 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4370     SDValue Op, SelectionDAG &DAG) const {
4371   MVT VT = Op.getSimpleValueType();
4372   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4373 
4374   MVT I1ContainerVT =
4375       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4376 
4377   SDValue CC =
4378       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4379   SDValue Op1 =
4380       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4381   SDValue Op2 =
4382       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4383 
4384   SDLoc DL(Op);
4385   SDValue Mask, VL;
4386   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4387 
4388   SDValue Select =
4389       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4390 
4391   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4392 }
4393 
4394 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4395                                                unsigned NewOpc,
4396                                                bool HasMask) const {
4397   MVT VT = Op.getSimpleValueType();
4398   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4399 
4400   // Create list of operands by converting existing ones to scalable types.
4401   SmallVector<SDValue, 6> Ops;
4402   for (const SDValue &V : Op->op_values()) {
4403     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4404 
4405     // Pass through non-vector operands.
4406     if (!V.getValueType().isVector()) {
4407       Ops.push_back(V);
4408       continue;
4409     }
4410 
4411     // "cast" fixed length vector to a scalable vector.
4412     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4413            "Only fixed length vectors are supported!");
4414     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4415   }
4416 
4417   SDLoc DL(Op);
4418   SDValue Mask, VL;
4419   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4420   if (HasMask)
4421     Ops.push_back(Mask);
4422   Ops.push_back(VL);
4423 
4424   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4425   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4426 }
4427 
4428 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4429 // * Operands of each node are assumed to be in the same order.
4430 // * The EVL operand is promoted from i32 to i64 on RV64.
4431 // * Fixed-length vectors are converted to their scalable-vector container
4432 //   types.
4433 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4434                                        unsigned RISCVISDOpc) const {
4435   SDLoc DL(Op);
4436   MVT VT = Op.getSimpleValueType();
4437   SmallVector<SDValue, 4> Ops;
4438 
4439   for (const auto &OpIdx : enumerate(Op->ops())) {
4440     SDValue V = OpIdx.value();
4441     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4442     // Pass through operands which aren't fixed-length vectors.
4443     if (!V.getValueType().isFixedLengthVector()) {
4444       Ops.push_back(V);
4445       continue;
4446     }
4447     // "cast" fixed length vector to a scalable vector.
4448     MVT OpVT = V.getSimpleValueType();
4449     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4450     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4451            "Only fixed length vectors are supported!");
4452     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4453   }
4454 
4455   if (!VT.isFixedLengthVector())
4456     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4457 
4458   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4459 
4460   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4461 
4462   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4463 }
4464 
4465 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4466 // a RVV indexed load. The RVV indexed load instructions only support the
4467 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4468 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4469 // indexing is extended to the XLEN value type and scaled accordingly.
4470 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4471   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4472   SDLoc DL(Op);
4473 
4474   SDValue Index = MGN->getIndex();
4475   SDValue Mask = MGN->getMask();
4476   SDValue PassThru = MGN->getPassThru();
4477 
4478   MVT VT = Op.getSimpleValueType();
4479   MVT IndexVT = Index.getSimpleValueType();
4480   MVT XLenVT = Subtarget.getXLenVT();
4481 
4482   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4483          "Unexpected VTs!");
4484   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4485          "Unexpected pointer type");
4486   // Targets have to explicitly opt-in for extending vector loads.
4487   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4488          "Unexpected extending MGATHER");
4489 
4490   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4491   // the selection of the masked intrinsics doesn't do this for us.
4492   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4493 
4494   SDValue VL;
4495   MVT ContainerVT = VT;
4496   if (VT.isFixedLengthVector()) {
4497     // We need to use the larger of the result and index type to determine the
4498     // scalable type to use so we don't increase LMUL for any operand/result.
4499     if (VT.bitsGE(IndexVT)) {
4500       ContainerVT = getContainerForFixedLengthVector(VT);
4501       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4502                                  ContainerVT.getVectorElementCount());
4503     } else {
4504       IndexVT = getContainerForFixedLengthVector(IndexVT);
4505       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4506                                      IndexVT.getVectorElementCount());
4507     }
4508 
4509     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4510 
4511     if (!IsUnmasked) {
4512       MVT MaskVT =
4513           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4514       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4515       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4516     }
4517 
4518     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4519   } else
4520     VL = DAG.getRegister(RISCV::X0, XLenVT);
4521 
4522   unsigned IntID =
4523       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4524   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4525                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4526   if (!IsUnmasked)
4527     Ops.push_back(PassThru);
4528   Ops.push_back(MGN->getBasePtr());
4529   Ops.push_back(Index);
4530   if (!IsUnmasked)
4531     Ops.push_back(Mask);
4532   Ops.push_back(VL);
4533 
4534   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4535   SDValue Result =
4536       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4537                               MGN->getMemoryVT(), MGN->getMemOperand());
4538   SDValue Chain = Result.getValue(1);
4539 
4540   if (VT.isFixedLengthVector())
4541     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4542 
4543   return DAG.getMergeValues({Result, Chain}, DL);
4544 }
4545 
4546 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4547 // a RVV indexed store. The RVV indexed store instructions only support the
4548 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4549 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4550 // indexing is extended to the XLEN value type and scaled accordingly.
4551 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4552                                            SelectionDAG &DAG) const {
4553   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4554   SDLoc DL(Op);
4555   SDValue Index = MSN->getIndex();
4556   SDValue Mask = MSN->getMask();
4557   SDValue Val = MSN->getValue();
4558 
4559   MVT VT = Val.getSimpleValueType();
4560   MVT IndexVT = Index.getSimpleValueType();
4561   MVT XLenVT = Subtarget.getXLenVT();
4562 
4563   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4564          "Unexpected VTs!");
4565   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4566          "Unexpected pointer type");
4567   // Targets have to explicitly opt-in for extending vector loads and
4568   // truncating vector stores.
4569   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4570 
4571   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4572   // the selection of the masked intrinsics doesn't do this for us.
4573   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4574 
4575   SDValue VL;
4576   if (VT.isFixedLengthVector()) {
4577     // We need to use the larger of the value and index type to determine the
4578     // scalable type to use so we don't increase LMUL for any operand/result.
4579     MVT ContainerVT;
4580     if (VT.bitsGE(IndexVT)) {
4581       ContainerVT = getContainerForFixedLengthVector(VT);
4582       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4583                                  ContainerVT.getVectorElementCount());
4584     } else {
4585       IndexVT = getContainerForFixedLengthVector(IndexVT);
4586       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4587                                      IndexVT.getVectorElementCount());
4588     }
4589 
4590     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4591     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4592 
4593     if (!IsUnmasked) {
4594       MVT MaskVT =
4595           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4596       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4597     }
4598 
4599     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4600   } else
4601     VL = DAG.getRegister(RISCV::X0, XLenVT);
4602 
4603   unsigned IntID =
4604       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4605   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4606                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4607   Ops.push_back(Val);
4608   Ops.push_back(MSN->getBasePtr());
4609   Ops.push_back(Index);
4610   if (!IsUnmasked)
4611     Ops.push_back(Mask);
4612   Ops.push_back(VL);
4613 
4614   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4615                                  MSN->getMemoryVT(), MSN->getMemOperand());
4616 }
4617 
4618 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4619                                                SelectionDAG &DAG) const {
4620   const MVT XLenVT = Subtarget.getXLenVT();
4621   SDLoc DL(Op);
4622   SDValue Chain = Op->getOperand(0);
4623   SDValue SysRegNo = DAG.getConstant(
4624       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4625   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4626   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4627 
4628   // Encoding used for rounding mode in RISCV differs from that used in
4629   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4630   // table, which consists of a sequence of 4-bit fields, each representing
4631   // corresponding FLT_ROUNDS mode.
4632   static const int Table =
4633       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4634       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4635       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4636       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4637       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4638 
4639   SDValue Shift =
4640       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4641   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4642                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4643   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4644                                DAG.getConstant(7, DL, XLenVT));
4645 
4646   return DAG.getMergeValues({Masked, Chain}, DL);
4647 }
4648 
4649 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4650                                                SelectionDAG &DAG) const {
4651   const MVT XLenVT = Subtarget.getXLenVT();
4652   SDLoc DL(Op);
4653   SDValue Chain = Op->getOperand(0);
4654   SDValue RMValue = Op->getOperand(1);
4655   SDValue SysRegNo = DAG.getConstant(
4656       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4657 
4658   // Encoding used for rounding mode in RISCV differs from that used in
4659   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4660   // a table, which consists of a sequence of 4-bit fields, each representing
4661   // corresponding RISCV mode.
4662   static const unsigned Table =
4663       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4664       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4665       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4666       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4667       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4668 
4669   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4670                               DAG.getConstant(2, DL, XLenVT));
4671   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4672                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4673   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4674                         DAG.getConstant(0x7, DL, XLenVT));
4675   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4676                      RMValue);
4677 }
4678 
4679 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4680 // form of the given Opcode.
4681 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4682   switch (Opcode) {
4683   default:
4684     llvm_unreachable("Unexpected opcode");
4685   case ISD::SHL:
4686     return RISCVISD::SLLW;
4687   case ISD::SRA:
4688     return RISCVISD::SRAW;
4689   case ISD::SRL:
4690     return RISCVISD::SRLW;
4691   case ISD::SDIV:
4692     return RISCVISD::DIVW;
4693   case ISD::UDIV:
4694     return RISCVISD::DIVUW;
4695   case ISD::UREM:
4696     return RISCVISD::REMUW;
4697   case ISD::ROTL:
4698     return RISCVISD::ROLW;
4699   case ISD::ROTR:
4700     return RISCVISD::RORW;
4701   case RISCVISD::GREV:
4702     return RISCVISD::GREVW;
4703   case RISCVISD::GORC:
4704     return RISCVISD::GORCW;
4705   }
4706 }
4707 
4708 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
4709 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
4710 // otherwise be promoted to i64, making it difficult to select the
4711 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
4712 // type i8/i16/i32 is lost.
4713 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4714                                    unsigned ExtOpc0 = ISD::ANY_EXTEND,
4715                                    unsigned ExtOpc1 = ISD::ANY_EXTEND) {
4716   SDLoc DL(N);
4717   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4718   SDValue NewOp0 = DAG.getNode(ExtOpc0, DL, MVT::i64, N->getOperand(0));
4719   SDValue NewOp1 = DAG.getNode(ExtOpc1, DL, MVT::i64, N->getOperand(1));
4720   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4721   // ReplaceNodeResults requires we maintain the same type for the return value.
4722   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4723 }
4724 
4725 // Converts the given 32-bit operation to a i64 operation with signed extension
4726 // semantic to reduce the signed extension instructions.
4727 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4728   SDLoc DL(N);
4729   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4730   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4731   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4732   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4733                                DAG.getValueType(MVT::i32));
4734   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4735 }
4736 
4737 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4738                                              SmallVectorImpl<SDValue> &Results,
4739                                              SelectionDAG &DAG) const {
4740   SDLoc DL(N);
4741   switch (N->getOpcode()) {
4742   default:
4743     llvm_unreachable("Don't know how to custom type legalize this operation!");
4744   case ISD::STRICT_FP_TO_SINT:
4745   case ISD::STRICT_FP_TO_UINT:
4746   case ISD::FP_TO_SINT:
4747   case ISD::FP_TO_UINT: {
4748     bool IsStrict = N->isStrictFPOpcode();
4749     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4750            "Unexpected custom legalisation");
4751     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4752     // If the FP type needs to be softened, emit a library call using the 'si'
4753     // version. If we left it to default legalization we'd end up with 'di'. If
4754     // the FP type doesn't need to be softened just let generic type
4755     // legalization promote the result type.
4756     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4757         TargetLowering::TypeSoftenFloat)
4758       return;
4759     RTLIB::Libcall LC;
4760     if (N->getOpcode() == ISD::FP_TO_SINT ||
4761         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4762       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4763     else
4764       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4765     MakeLibCallOptions CallOptions;
4766     EVT OpVT = Op0.getValueType();
4767     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4768     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4769     SDValue Result;
4770     std::tie(Result, Chain) =
4771         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4772     Results.push_back(Result);
4773     if (IsStrict)
4774       Results.push_back(Chain);
4775     break;
4776   }
4777   case ISD::READCYCLECOUNTER: {
4778     assert(!Subtarget.is64Bit() &&
4779            "READCYCLECOUNTER only has custom type legalization on riscv32");
4780 
4781     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4782     SDValue RCW =
4783         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4784 
4785     Results.push_back(
4786         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4787     Results.push_back(RCW.getValue(2));
4788     break;
4789   }
4790   case ISD::MUL: {
4791     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4792     unsigned XLen = Subtarget.getXLen();
4793     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4794     if (Size > XLen) {
4795       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4796       SDValue LHS = N->getOperand(0);
4797       SDValue RHS = N->getOperand(1);
4798       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4799 
4800       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4801       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4802       // We need exactly one side to be unsigned.
4803       if (LHSIsU == RHSIsU)
4804         return;
4805 
4806       auto MakeMULPair = [&](SDValue S, SDValue U) {
4807         MVT XLenVT = Subtarget.getXLenVT();
4808         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4809         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4810         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4811         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4812         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4813       };
4814 
4815       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4816       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4817 
4818       // The other operand should be signed, but still prefer MULH when
4819       // possible.
4820       if (RHSIsU && LHSIsS && !RHSIsS)
4821         Results.push_back(MakeMULPair(LHS, RHS));
4822       else if (LHSIsU && RHSIsS && !LHSIsS)
4823         Results.push_back(MakeMULPair(RHS, LHS));
4824 
4825       return;
4826     }
4827     LLVM_FALLTHROUGH;
4828   }
4829   case ISD::ADD:
4830   case ISD::SUB:
4831     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4832            "Unexpected custom legalisation");
4833     if (N->getOperand(1).getOpcode() == ISD::Constant)
4834       return;
4835     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4836     break;
4837   case ISD::SHL:
4838   case ISD::SRA:
4839   case ISD::SRL:
4840     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4841            "Unexpected custom legalisation");
4842     if (N->getOperand(1).getOpcode() == ISD::Constant)
4843       return;
4844     Results.push_back(customLegalizeToWOp(N, DAG));
4845     break;
4846   case ISD::ROTL:
4847   case ISD::ROTR:
4848     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4849            "Unexpected custom legalisation");
4850     Results.push_back(customLegalizeToWOp(N, DAG));
4851     break;
4852   case ISD::CTTZ:
4853   case ISD::CTTZ_ZERO_UNDEF:
4854   case ISD::CTLZ:
4855   case ISD::CTLZ_ZERO_UNDEF: {
4856     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4857            "Unexpected custom legalisation");
4858 
4859     SDValue NewOp0 =
4860         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4861     bool IsCTZ =
4862         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4863     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4864     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4865     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4866     return;
4867   }
4868   case ISD::SDIV:
4869   case ISD::UDIV:
4870   case ISD::UREM: {
4871     MVT VT = N->getSimpleValueType(0);
4872     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4873            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4874            "Unexpected custom legalisation");
4875     // Don't promote division/remainder by constant since we should expand those
4876     // to multiply by magic constant.
4877     // FIXME: What if the expansion is disabled for minsize.
4878     if (N->getOperand(1).getOpcode() == ISD::Constant)
4879       return;
4880 
4881     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4882     // the upper 32 bits. For other types we need to sign or zero extend
4883     // based on the opcode.
4884     unsigned ExtOpc0 = ISD::ANY_EXTEND, ExtOpc1 = ISD::ANY_EXTEND;
4885     if (VT != MVT::i32) {
4886       ExtOpc0 = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4887                                             : ISD::ZERO_EXTEND;
4888       ExtOpc1 = ExtOpc0;
4889     } else if (N->getOperand(0).getOpcode() == ISD::Constant) {
4890       // Sign extend i32 constants to improve materialization.
4891       ExtOpc0 = ISD::SIGN_EXTEND;
4892     }
4893 
4894     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc0, ExtOpc1));
4895     break;
4896   }
4897   case ISD::UADDO:
4898   case ISD::USUBO: {
4899     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4900            "Unexpected custom legalisation");
4901     bool IsAdd = N->getOpcode() == ISD::UADDO;
4902     // Create an ADDW or SUBW.
4903     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4904     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4905     SDValue Res =
4906         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4907     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4908                       DAG.getValueType(MVT::i32));
4909 
4910     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4911     // Since the inputs are sign extended from i32, this is equivalent to
4912     // comparing the lower 32 bits.
4913     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4914     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4915                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4916 
4917     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4918     Results.push_back(Overflow);
4919     return;
4920   }
4921   case ISD::UADDSAT:
4922   case ISD::USUBSAT: {
4923     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4924            "Unexpected custom legalisation");
4925     if (Subtarget.hasStdExtZbb()) {
4926       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4927       // sign extend allows overflow of the lower 32 bits to be detected on
4928       // the promoted size.
4929       SDValue LHS =
4930           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4931       SDValue RHS =
4932           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4933       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4934       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4935       return;
4936     }
4937 
4938     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4939     // promotion for UADDO/USUBO.
4940     Results.push_back(expandAddSubSat(N, DAG));
4941     return;
4942   }
4943   case ISD::BITCAST: {
4944     EVT VT = N->getValueType(0);
4945     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4946     SDValue Op0 = N->getOperand(0);
4947     EVT Op0VT = Op0.getValueType();
4948     MVT XLenVT = Subtarget.getXLenVT();
4949     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4950       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4951       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4952     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4953                Subtarget.hasStdExtF()) {
4954       SDValue FPConv =
4955           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4956       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4957     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4958                isTypeLegal(Op0VT)) {
4959       // Custom-legalize bitcasts from fixed-length vector types to illegal
4960       // scalar types in order to improve codegen. Bitcast the vector to a
4961       // one-element vector type whose element type is the same as the result
4962       // type, and extract the first element.
4963       LLVMContext &Context = *DAG.getContext();
4964       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4965       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4966                                     DAG.getConstant(0, DL, XLenVT)));
4967     }
4968     break;
4969   }
4970   case RISCVISD::GREV:
4971   case RISCVISD::GORC: {
4972     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4973            "Unexpected custom legalisation");
4974     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4975     // This is similar to customLegalizeToWOp, except that we pass the second
4976     // operand (a TargetConstant) straight through: it is already of type
4977     // XLenVT.
4978     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4979     SDValue NewOp0 =
4980         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4981     SDValue NewOp1 =
4982         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4983     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4984     // ReplaceNodeResults requires we maintain the same type for the return
4985     // value.
4986     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4987     break;
4988   }
4989   case RISCVISD::SHFL: {
4990     // There is no SHFLIW instruction, but we can just promote the operation.
4991     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4992            "Unexpected custom legalisation");
4993     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4994     SDValue NewOp0 =
4995         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4996     SDValue NewOp1 =
4997         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4998     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4999     // ReplaceNodeResults requires we maintain the same type for the return
5000     // value.
5001     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5002     break;
5003   }
5004   case ISD::BSWAP:
5005   case ISD::BITREVERSE: {
5006     MVT VT = N->getSimpleValueType(0);
5007     MVT XLenVT = Subtarget.getXLenVT();
5008     assert((VT == MVT::i8 || VT == MVT::i16 ||
5009             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5010            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5011     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5012     unsigned Imm = VT.getSizeInBits() - 1;
5013     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5014     if (N->getOpcode() == ISD::BSWAP)
5015       Imm &= ~0x7U;
5016     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5017     SDValue GREVI =
5018         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5019     // ReplaceNodeResults requires we maintain the same type for the return
5020     // value.
5021     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5022     break;
5023   }
5024   case ISD::FSHL:
5025   case ISD::FSHR: {
5026     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5027            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5028     SDValue NewOp0 =
5029         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5030     SDValue NewOp1 =
5031         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5032     SDValue NewOp2 =
5033         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5034     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5035     // Mask the shift amount to 5 bits.
5036     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5037                          DAG.getConstant(0x1f, DL, MVT::i64));
5038     unsigned Opc =
5039         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5040     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5041     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5042     break;
5043   }
5044   case ISD::EXTRACT_VECTOR_ELT: {
5045     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5046     // type is illegal (currently only vXi64 RV32).
5047     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5048     // transferred to the destination register. We issue two of these from the
5049     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5050     // first element.
5051     SDValue Vec = N->getOperand(0);
5052     SDValue Idx = N->getOperand(1);
5053 
5054     // The vector type hasn't been legalized yet so we can't issue target
5055     // specific nodes if it needs legalization.
5056     // FIXME: We would manually legalize if it's important.
5057     if (!isTypeLegal(Vec.getValueType()))
5058       return;
5059 
5060     MVT VecVT = Vec.getSimpleValueType();
5061 
5062     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5063            VecVT.getVectorElementType() == MVT::i64 &&
5064            "Unexpected EXTRACT_VECTOR_ELT legalization");
5065 
5066     // If this is a fixed vector, we need to convert it to a scalable vector.
5067     MVT ContainerVT = VecVT;
5068     if (VecVT.isFixedLengthVector()) {
5069       ContainerVT = getContainerForFixedLengthVector(VecVT);
5070       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5071     }
5072 
5073     MVT XLenVT = Subtarget.getXLenVT();
5074 
5075     // Use a VL of 1 to avoid processing more elements than we need.
5076     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5077     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5078     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5079 
5080     // Unless the index is known to be 0, we must slide the vector down to get
5081     // the desired element into index 0.
5082     if (!isNullConstant(Idx)) {
5083       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5084                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5085     }
5086 
5087     // Extract the lower XLEN bits of the correct vector element.
5088     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5089 
5090     // To extract the upper XLEN bits of the vector element, shift the first
5091     // element right by 32 bits and re-extract the lower XLEN bits.
5092     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5093                                      DAG.getConstant(32, DL, XLenVT), VL);
5094     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5095                                  ThirtyTwoV, Mask, VL);
5096 
5097     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5098 
5099     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5100     break;
5101   }
5102   case ISD::INTRINSIC_WO_CHAIN: {
5103     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5104     switch (IntNo) {
5105     default:
5106       llvm_unreachable(
5107           "Don't know how to custom type legalize this intrinsic!");
5108     case Intrinsic::riscv_orc_b: {
5109       // Lower to the GORCI encoding for orc.b with the operand extended.
5110       SDValue NewOp =
5111           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5112       // If Zbp is enabled, use GORCIW which will sign extend the result.
5113       unsigned Opc =
5114           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5115       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5116                                 DAG.getConstant(7, DL, MVT::i64));
5117       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5118       return;
5119     }
5120     case Intrinsic::riscv_grev:
5121     case Intrinsic::riscv_gorc: {
5122       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5123              "Unexpected custom legalisation");
5124       SDValue NewOp1 =
5125           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5126       SDValue NewOp2 =
5127           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5128       unsigned Opc =
5129           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5130       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5131       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5132       break;
5133     }
5134     case Intrinsic::riscv_shfl:
5135     case Intrinsic::riscv_unshfl: {
5136       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5137              "Unexpected custom legalisation");
5138       SDValue NewOp1 =
5139           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5140       SDValue NewOp2 =
5141           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5142       unsigned Opc =
5143           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5144       if (isa<ConstantSDNode>(N->getOperand(2))) {
5145         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5146                              DAG.getConstant(0xf, DL, MVT::i64));
5147         Opc =
5148             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5149       }
5150       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5151       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5152       break;
5153     }
5154     case Intrinsic::riscv_bcompress:
5155     case Intrinsic::riscv_bdecompress: {
5156       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5157              "Unexpected custom legalisation");
5158       SDValue NewOp1 =
5159           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5160       SDValue NewOp2 =
5161           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5162       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5163                          ? RISCVISD::BCOMPRESSW
5164                          : RISCVISD::BDECOMPRESSW;
5165       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5166       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5167       break;
5168     }
5169     case Intrinsic::riscv_vmv_x_s: {
5170       EVT VT = N->getValueType(0);
5171       MVT XLenVT = Subtarget.getXLenVT();
5172       if (VT.bitsLT(XLenVT)) {
5173         // Simple case just extract using vmv.x.s and truncate.
5174         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5175                                       Subtarget.getXLenVT(), N->getOperand(1));
5176         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5177         return;
5178       }
5179 
5180       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5181              "Unexpected custom legalization");
5182 
5183       // We need to do the move in two steps.
5184       SDValue Vec = N->getOperand(1);
5185       MVT VecVT = Vec.getSimpleValueType();
5186 
5187       // First extract the lower XLEN bits of the element.
5188       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5189 
5190       // To extract the upper XLEN bits of the vector element, shift the first
5191       // element right by 32 bits and re-extract the lower XLEN bits.
5192       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5193       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5194       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5195       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5196                                        DAG.getConstant(32, DL, XLenVT), VL);
5197       SDValue LShr32 =
5198           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5199       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5200 
5201       Results.push_back(
5202           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5203       break;
5204     }
5205     }
5206     break;
5207   }
5208   case ISD::VECREDUCE_ADD:
5209   case ISD::VECREDUCE_AND:
5210   case ISD::VECREDUCE_OR:
5211   case ISD::VECREDUCE_XOR:
5212   case ISD::VECREDUCE_SMAX:
5213   case ISD::VECREDUCE_UMAX:
5214   case ISD::VECREDUCE_SMIN:
5215   case ISD::VECREDUCE_UMIN:
5216     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5217       Results.push_back(V);
5218     break;
5219   case ISD::FLT_ROUNDS_: {
5220     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5221     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5222     Results.push_back(Res.getValue(0));
5223     Results.push_back(Res.getValue(1));
5224     break;
5225   }
5226   }
5227 }
5228 
5229 // A structure to hold one of the bit-manipulation patterns below. Together, a
5230 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5231 //   (or (and (shl x, 1), 0xAAAAAAAA),
5232 //       (and (srl x, 1), 0x55555555))
5233 struct RISCVBitmanipPat {
5234   SDValue Op;
5235   unsigned ShAmt;
5236   bool IsSHL;
5237 
5238   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5239     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5240   }
5241 };
5242 
5243 // Matches patterns of the form
5244 //   (and (shl x, C2), (C1 << C2))
5245 //   (and (srl x, C2), C1)
5246 //   (shl (and x, C1), C2)
5247 //   (srl (and x, (C1 << C2)), C2)
5248 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5249 // The expected masks for each shift amount are specified in BitmanipMasks where
5250 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5251 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5252 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5253 // XLen is 64.
5254 static Optional<RISCVBitmanipPat>
5255 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5256   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5257          "Unexpected number of masks");
5258   Optional<uint64_t> Mask;
5259   // Optionally consume a mask around the shift operation.
5260   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5261     Mask = Op.getConstantOperandVal(1);
5262     Op = Op.getOperand(0);
5263   }
5264   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5265     return None;
5266   bool IsSHL = Op.getOpcode() == ISD::SHL;
5267 
5268   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5269     return None;
5270   uint64_t ShAmt = Op.getConstantOperandVal(1);
5271 
5272   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5273   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5274     return None;
5275   // If we don't have enough masks for 64 bit, then we must be trying to
5276   // match SHFL so we're only allowed to shift 1/4 of the width.
5277   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5278     return None;
5279 
5280   SDValue Src = Op.getOperand(0);
5281 
5282   // The expected mask is shifted left when the AND is found around SHL
5283   // patterns.
5284   //   ((x >> 1) & 0x55555555)
5285   //   ((x << 1) & 0xAAAAAAAA)
5286   bool SHLExpMask = IsSHL;
5287 
5288   if (!Mask) {
5289     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5290     // the mask is all ones: consume that now.
5291     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5292       Mask = Src.getConstantOperandVal(1);
5293       Src = Src.getOperand(0);
5294       // The expected mask is now in fact shifted left for SRL, so reverse the
5295       // decision.
5296       //   ((x & 0xAAAAAAAA) >> 1)
5297       //   ((x & 0x55555555) << 1)
5298       SHLExpMask = !SHLExpMask;
5299     } else {
5300       // Use a default shifted mask of all-ones if there's no AND, truncated
5301       // down to the expected width. This simplifies the logic later on.
5302       Mask = maskTrailingOnes<uint64_t>(Width);
5303       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5304     }
5305   }
5306 
5307   unsigned MaskIdx = Log2_32(ShAmt);
5308   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5309 
5310   if (SHLExpMask)
5311     ExpMask <<= ShAmt;
5312 
5313   if (Mask != ExpMask)
5314     return None;
5315 
5316   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5317 }
5318 
5319 // Matches any of the following bit-manipulation patterns:
5320 //   (and (shl x, 1), (0x55555555 << 1))
5321 //   (and (srl x, 1), 0x55555555)
5322 //   (shl (and x, 0x55555555), 1)
5323 //   (srl (and x, (0x55555555 << 1)), 1)
5324 // where the shift amount and mask may vary thus:
5325 //   [1]  = 0x55555555 / 0xAAAAAAAA
5326 //   [2]  = 0x33333333 / 0xCCCCCCCC
5327 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5328 //   [8]  = 0x00FF00FF / 0xFF00FF00
5329 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5330 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5331 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5332   // These are the unshifted masks which we use to match bit-manipulation
5333   // patterns. They may be shifted left in certain circumstances.
5334   static const uint64_t BitmanipMasks[] = {
5335       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5336       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5337 
5338   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5339 }
5340 
5341 // Match the following pattern as a GREVI(W) operation
5342 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5343 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5344                                const RISCVSubtarget &Subtarget) {
5345   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5346   EVT VT = Op.getValueType();
5347 
5348   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5349     auto LHS = matchGREVIPat(Op.getOperand(0));
5350     auto RHS = matchGREVIPat(Op.getOperand(1));
5351     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5352       SDLoc DL(Op);
5353       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5354                          DAG.getConstant(LHS->ShAmt, DL, VT));
5355     }
5356   }
5357   return SDValue();
5358 }
5359 
5360 // Matches any the following pattern as a GORCI(W) operation
5361 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5362 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5363 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5364 // Note that with the variant of 3.,
5365 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5366 // the inner pattern will first be matched as GREVI and then the outer
5367 // pattern will be matched to GORC via the first rule above.
5368 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5369 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5370                                const RISCVSubtarget &Subtarget) {
5371   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5372   EVT VT = Op.getValueType();
5373 
5374   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5375     SDLoc DL(Op);
5376     SDValue Op0 = Op.getOperand(0);
5377     SDValue Op1 = Op.getOperand(1);
5378 
5379     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5380       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5381           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5382           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5383         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5384       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5385       if ((Reverse.getOpcode() == ISD::ROTL ||
5386            Reverse.getOpcode() == ISD::ROTR) &&
5387           Reverse.getOperand(0) == X &&
5388           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5389         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5390         if (RotAmt == (VT.getSizeInBits() / 2))
5391           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5392                              DAG.getConstant(RotAmt, DL, VT));
5393       }
5394       return SDValue();
5395     };
5396 
5397     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5398     if (SDValue V = MatchOROfReverse(Op0, Op1))
5399       return V;
5400     if (SDValue V = MatchOROfReverse(Op1, Op0))
5401       return V;
5402 
5403     // OR is commutable so canonicalize its OR operand to the left
5404     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5405       std::swap(Op0, Op1);
5406     if (Op0.getOpcode() != ISD::OR)
5407       return SDValue();
5408     SDValue OrOp0 = Op0.getOperand(0);
5409     SDValue OrOp1 = Op0.getOperand(1);
5410     auto LHS = matchGREVIPat(OrOp0);
5411     // OR is commutable so swap the operands and try again: x might have been
5412     // on the left
5413     if (!LHS) {
5414       std::swap(OrOp0, OrOp1);
5415       LHS = matchGREVIPat(OrOp0);
5416     }
5417     auto RHS = matchGREVIPat(Op1);
5418     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5419       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5420                          DAG.getConstant(LHS->ShAmt, DL, VT));
5421     }
5422   }
5423   return SDValue();
5424 }
5425 
5426 // Matches any of the following bit-manipulation patterns:
5427 //   (and (shl x, 1), (0x22222222 << 1))
5428 //   (and (srl x, 1), 0x22222222)
5429 //   (shl (and x, 0x22222222), 1)
5430 //   (srl (and x, (0x22222222 << 1)), 1)
5431 // where the shift amount and mask may vary thus:
5432 //   [1]  = 0x22222222 / 0x44444444
5433 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5434 //   [4]  = 0x00F000F0 / 0x0F000F00
5435 //   [8]  = 0x0000FF00 / 0x00FF0000
5436 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5437 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5438   // These are the unshifted masks which we use to match bit-manipulation
5439   // patterns. They may be shifted left in certain circumstances.
5440   static const uint64_t BitmanipMasks[] = {
5441       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5442       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5443 
5444   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5445 }
5446 
5447 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5448 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5449                                const RISCVSubtarget &Subtarget) {
5450   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5451   EVT VT = Op.getValueType();
5452 
5453   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5454     return SDValue();
5455 
5456   SDValue Op0 = Op.getOperand(0);
5457   SDValue Op1 = Op.getOperand(1);
5458 
5459   // Or is commutable so canonicalize the second OR to the LHS.
5460   if (Op0.getOpcode() != ISD::OR)
5461     std::swap(Op0, Op1);
5462   if (Op0.getOpcode() != ISD::OR)
5463     return SDValue();
5464 
5465   // We found an inner OR, so our operands are the operands of the inner OR
5466   // and the other operand of the outer OR.
5467   SDValue A = Op0.getOperand(0);
5468   SDValue B = Op0.getOperand(1);
5469   SDValue C = Op1;
5470 
5471   auto Match1 = matchSHFLPat(A);
5472   auto Match2 = matchSHFLPat(B);
5473 
5474   // If neither matched, we failed.
5475   if (!Match1 && !Match2)
5476     return SDValue();
5477 
5478   // We had at least one match. if one failed, try the remaining C operand.
5479   if (!Match1) {
5480     std::swap(A, C);
5481     Match1 = matchSHFLPat(A);
5482     if (!Match1)
5483       return SDValue();
5484   } else if (!Match2) {
5485     std::swap(B, C);
5486     Match2 = matchSHFLPat(B);
5487     if (!Match2)
5488       return SDValue();
5489   }
5490   assert(Match1 && Match2);
5491 
5492   // Make sure our matches pair up.
5493   if (!Match1->formsPairWith(*Match2))
5494     return SDValue();
5495 
5496   // All the remains is to make sure C is an AND with the same input, that masks
5497   // out the bits that are being shuffled.
5498   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5499       C.getOperand(0) != Match1->Op)
5500     return SDValue();
5501 
5502   uint64_t Mask = C.getConstantOperandVal(1);
5503 
5504   static const uint64_t BitmanipMasks[] = {
5505       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5506       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5507   };
5508 
5509   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5510   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5511   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5512 
5513   if (Mask != ExpMask)
5514     return SDValue();
5515 
5516   SDLoc DL(Op);
5517   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5518                      DAG.getConstant(Match1->ShAmt, DL, VT));
5519 }
5520 
5521 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5522 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5523 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5524 // not undo itself, but they are redundant.
5525 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5526   SDValue Src = N->getOperand(0);
5527 
5528   if (Src.getOpcode() != N->getOpcode())
5529     return SDValue();
5530 
5531   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5532       !isa<ConstantSDNode>(Src.getOperand(1)))
5533     return SDValue();
5534 
5535   unsigned ShAmt1 = N->getConstantOperandVal(1);
5536   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5537   Src = Src.getOperand(0);
5538 
5539   unsigned CombinedShAmt;
5540   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5541     CombinedShAmt = ShAmt1 | ShAmt2;
5542   else
5543     CombinedShAmt = ShAmt1 ^ ShAmt2;
5544 
5545   if (CombinedShAmt == 0)
5546     return Src;
5547 
5548   SDLoc DL(N);
5549   return DAG.getNode(
5550       N->getOpcode(), DL, N->getValueType(0), Src,
5551       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5552 }
5553 
5554 // Combine a constant select operand into its use:
5555 //
5556 // (and (select_cc lhs, rhs, cc, -1, c), x)
5557 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5558 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5559 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5560 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5561 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5562 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5563                                      SelectionDAG &DAG, bool AllOnes) {
5564   EVT VT = N->getValueType(0);
5565 
5566   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5567     return SDValue();
5568 
5569   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5570     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5571   };
5572 
5573   bool SwapSelectOps;
5574   SDValue TrueVal = Slct.getOperand(3);
5575   SDValue FalseVal = Slct.getOperand(4);
5576   SDValue NonConstantVal;
5577   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5578     SwapSelectOps = false;
5579     NonConstantVal = FalseVal;
5580   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5581     SwapSelectOps = true;
5582     NonConstantVal = TrueVal;
5583   } else
5584     return SDValue();
5585 
5586   // Slct is now know to be the desired identity constant when CC is true.
5587   TrueVal = OtherOp;
5588   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5589   // Unless SwapSelectOps says CC should be false.
5590   if (SwapSelectOps)
5591     std::swap(TrueVal, FalseVal);
5592 
5593   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5594                      {Slct.getOperand(0), Slct.getOperand(1),
5595                       Slct.getOperand(2), TrueVal, FalseVal});
5596 }
5597 
5598 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5599 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5600                                                 bool AllOnes) {
5601   SDValue N0 = N->getOperand(0);
5602   SDValue N1 = N->getOperand(1);
5603   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5604     return Result;
5605   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5606     return Result;
5607   return SDValue();
5608 }
5609 
5610 static SDValue performANDCombine(SDNode *N,
5611                                  TargetLowering::DAGCombinerInfo &DCI,
5612                                  const RISCVSubtarget &Subtarget) {
5613   SelectionDAG &DAG = DCI.DAG;
5614 
5615   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5616   //      (select lhs, rhs, cc, x, (and x, y))
5617   return combineSelectCCAndUseCommutative(N, DAG, true);
5618 }
5619 
5620 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5621                                 const RISCVSubtarget &Subtarget) {
5622   SelectionDAG &DAG = DCI.DAG;
5623   if (Subtarget.hasStdExtZbp()) {
5624     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5625       return GREV;
5626     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5627       return GORC;
5628     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5629       return SHFL;
5630   }
5631 
5632   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5633   //      (select lhs, rhs, cc, x, (or x, y))
5634   return combineSelectCCAndUseCommutative(N, DAG, false);
5635 }
5636 
5637 static SDValue performXORCombine(SDNode *N,
5638                                  TargetLowering::DAGCombinerInfo &DCI,
5639                                  const RISCVSubtarget &Subtarget) {
5640   SelectionDAG &DAG = DCI.DAG;
5641 
5642   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5643   //      (select lhs, rhs, cc, x, (xor x, y))
5644   return combineSelectCCAndUseCommutative(N, DAG, false);
5645 }
5646 
5647 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
5648 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
5649 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
5650 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
5651 // ADDW/SUBW/MULW.
5652 static SDValue performANY_EXTENDCombine(SDNode *N,
5653                                         TargetLowering::DAGCombinerInfo &DCI,
5654                                         const RISCVSubtarget &Subtarget) {
5655   if (!Subtarget.is64Bit())
5656     return SDValue();
5657 
5658   SelectionDAG &DAG = DCI.DAG;
5659 
5660   SDValue Src = N->getOperand(0);
5661   EVT VT = N->getValueType(0);
5662   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
5663     return SDValue();
5664 
5665   // The opcode must be one that can implicitly sign_extend.
5666   // FIXME: Additional opcodes.
5667   switch (Src.getOpcode()) {
5668   default:
5669     return SDValue();
5670   case ISD::MUL:
5671     if (!Subtarget.hasStdExtM())
5672       return SDValue();
5673     LLVM_FALLTHROUGH;
5674   case ISD::ADD:
5675   case ISD::SUB:
5676     break;
5677   }
5678 
5679   SmallVector<SDNode *, 4> SetCCs;
5680   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
5681                             UE = Src.getNode()->use_end();
5682        UI != UE; ++UI) {
5683     SDNode *User = *UI;
5684     if (User == N)
5685       continue;
5686     if (UI.getUse().getResNo() != Src.getResNo())
5687       continue;
5688     // All i32 setccs are legalized by sign extending operands.
5689     if (User->getOpcode() == ISD::SETCC) {
5690       SetCCs.push_back(User);
5691       continue;
5692     }
5693     // We don't know if we can extend this user.
5694     break;
5695   }
5696 
5697   // If we don't have any SetCCs, this isn't worthwhile.
5698   if (SetCCs.empty())
5699     return SDValue();
5700 
5701   SDLoc DL(N);
5702   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
5703   DCI.CombineTo(N, SExt);
5704 
5705   // Promote all the setccs.
5706   for (SDNode *SetCC : SetCCs) {
5707     SmallVector<SDValue, 4> Ops;
5708 
5709     for (unsigned j = 0; j != 2; ++j) {
5710       SDValue SOp = SetCC->getOperand(j);
5711       if (SOp == Src)
5712         Ops.push_back(SExt);
5713       else
5714         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
5715     }
5716 
5717     Ops.push_back(SetCC->getOperand(2));
5718     DCI.CombineTo(SetCC,
5719                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5720   }
5721   return SDValue(N, 0);
5722 }
5723 
5724 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5725                                                DAGCombinerInfo &DCI) const {
5726   SelectionDAG &DAG = DCI.DAG;
5727 
5728   switch (N->getOpcode()) {
5729   default:
5730     break;
5731   case RISCVISD::SplitF64: {
5732     SDValue Op0 = N->getOperand(0);
5733     // If the input to SplitF64 is just BuildPairF64 then the operation is
5734     // redundant. Instead, use BuildPairF64's operands directly.
5735     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5736       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5737 
5738     SDLoc DL(N);
5739 
5740     // It's cheaper to materialise two 32-bit integers than to load a double
5741     // from the constant pool and transfer it to integer registers through the
5742     // stack.
5743     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5744       APInt V = C->getValueAPF().bitcastToAPInt();
5745       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5746       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5747       return DCI.CombineTo(N, Lo, Hi);
5748     }
5749 
5750     // This is a target-specific version of a DAGCombine performed in
5751     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5752     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5753     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5754     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5755         !Op0.getNode()->hasOneUse())
5756       break;
5757     SDValue NewSplitF64 =
5758         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5759                     Op0.getOperand(0));
5760     SDValue Lo = NewSplitF64.getValue(0);
5761     SDValue Hi = NewSplitF64.getValue(1);
5762     APInt SignBit = APInt::getSignMask(32);
5763     if (Op0.getOpcode() == ISD::FNEG) {
5764       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5765                                   DAG.getConstant(SignBit, DL, MVT::i32));
5766       return DCI.CombineTo(N, Lo, NewHi);
5767     }
5768     assert(Op0.getOpcode() == ISD::FABS);
5769     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5770                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5771     return DCI.CombineTo(N, Lo, NewHi);
5772   }
5773   case RISCVISD::SLLW:
5774   case RISCVISD::SRAW:
5775   case RISCVISD::SRLW:
5776   case RISCVISD::ROLW:
5777   case RISCVISD::RORW: {
5778     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5779     SDValue LHS = N->getOperand(0);
5780     SDValue RHS = N->getOperand(1);
5781     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5782     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5783     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5784         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5785       if (N->getOpcode() != ISD::DELETED_NODE)
5786         DCI.AddToWorklist(N);
5787       return SDValue(N, 0);
5788     }
5789     break;
5790   }
5791   case RISCVISD::CLZW:
5792   case RISCVISD::CTZW: {
5793     // Only the lower 32 bits of the first operand are read
5794     SDValue Op0 = N->getOperand(0);
5795     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5796     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5797       if (N->getOpcode() != ISD::DELETED_NODE)
5798         DCI.AddToWorklist(N);
5799       return SDValue(N, 0);
5800     }
5801     break;
5802   }
5803   case RISCVISD::FSL:
5804   case RISCVISD::FSR: {
5805     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5806     SDValue ShAmt = N->getOperand(2);
5807     unsigned BitWidth = ShAmt.getValueSizeInBits();
5808     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5809     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5810     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5811       if (N->getOpcode() != ISD::DELETED_NODE)
5812         DCI.AddToWorklist(N);
5813       return SDValue(N, 0);
5814     }
5815     break;
5816   }
5817   case RISCVISD::FSLW:
5818   case RISCVISD::FSRW: {
5819     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5820     // read.
5821     SDValue Op0 = N->getOperand(0);
5822     SDValue Op1 = N->getOperand(1);
5823     SDValue ShAmt = N->getOperand(2);
5824     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5825     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5826     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5827         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5828         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5829       if (N->getOpcode() != ISD::DELETED_NODE)
5830         DCI.AddToWorklist(N);
5831       return SDValue(N, 0);
5832     }
5833     break;
5834   }
5835   case RISCVISD::GREV:
5836   case RISCVISD::GORC: {
5837     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5838     SDValue ShAmt = N->getOperand(1);
5839     unsigned BitWidth = ShAmt.getValueSizeInBits();
5840     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5841     APInt ShAmtMask(BitWidth, BitWidth - 1);
5842     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5843       if (N->getOpcode() != ISD::DELETED_NODE)
5844         DCI.AddToWorklist(N);
5845       return SDValue(N, 0);
5846     }
5847 
5848     return combineGREVI_GORCI(N, DCI.DAG);
5849   }
5850   case RISCVISD::GREVW:
5851   case RISCVISD::GORCW: {
5852     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5853     SDValue LHS = N->getOperand(0);
5854     SDValue RHS = N->getOperand(1);
5855     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5856     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5857     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5858         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5859       if (N->getOpcode() != ISD::DELETED_NODE)
5860         DCI.AddToWorklist(N);
5861       return SDValue(N, 0);
5862     }
5863 
5864     return combineGREVI_GORCI(N, DCI.DAG);
5865   }
5866   case RISCVISD::SHFL:
5867   case RISCVISD::UNSHFL: {
5868     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5869     SDValue ShAmt = N->getOperand(1);
5870     unsigned BitWidth = ShAmt.getValueSizeInBits();
5871     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5872     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5873     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5874       if (N->getOpcode() != ISD::DELETED_NODE)
5875         DCI.AddToWorklist(N);
5876       return SDValue(N, 0);
5877     }
5878 
5879     break;
5880   }
5881   case RISCVISD::SHFLW:
5882   case RISCVISD::UNSHFLW: {
5883     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5884     SDValue LHS = N->getOperand(0);
5885     SDValue RHS = N->getOperand(1);
5886     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5887     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5888     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5889         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5890       if (N->getOpcode() != ISD::DELETED_NODE)
5891         DCI.AddToWorklist(N);
5892       return SDValue(N, 0);
5893     }
5894 
5895     break;
5896   }
5897   case RISCVISD::BCOMPRESSW:
5898   case RISCVISD::BDECOMPRESSW: {
5899     // Only the lower 32 bits of LHS and RHS are read.
5900     SDValue LHS = N->getOperand(0);
5901     SDValue RHS = N->getOperand(1);
5902     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5903     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5904         SimplifyDemandedBits(RHS, Mask, DCI)) {
5905       if (N->getOpcode() != ISD::DELETED_NODE)
5906         DCI.AddToWorklist(N);
5907       return SDValue(N, 0);
5908     }
5909 
5910     break;
5911   }
5912   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5913     SDLoc DL(N);
5914     SDValue Op0 = N->getOperand(0);
5915     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5916     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5917     // of the FMV_W_X_RV64 operand.
5918     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5919       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5920              "Unexpected value type!");
5921       return Op0.getOperand(0);
5922     }
5923 
5924     // This is a target-specific version of a DAGCombine performed in
5925     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5926     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5927     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5928     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5929         !Op0.getNode()->hasOneUse())
5930       break;
5931     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5932                                  Op0.getOperand(0));
5933     APInt SignBit = APInt::getSignMask(32).sext(64);
5934     if (Op0.getOpcode() == ISD::FNEG)
5935       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5936                          DAG.getConstant(SignBit, DL, MVT::i64));
5937 
5938     assert(Op0.getOpcode() == ISD::FABS);
5939     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5940                        DAG.getConstant(~SignBit, DL, MVT::i64));
5941   }
5942   case ISD::AND:
5943     return performANDCombine(N, DCI, Subtarget);
5944   case ISD::OR:
5945     return performORCombine(N, DCI, Subtarget);
5946   case ISD::XOR:
5947     return performXORCombine(N, DCI, Subtarget);
5948   case ISD::ANY_EXTEND:
5949     return performANY_EXTENDCombine(N, DCI, Subtarget);
5950   case RISCVISD::SELECT_CC: {
5951     // Transform
5952     SDValue LHS = N->getOperand(0);
5953     SDValue RHS = N->getOperand(1);
5954     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5955     if (!ISD::isIntEqualitySetCC(CCVal))
5956       break;
5957 
5958     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5959     //      (select_cc X, Y, lt, trueV, falseV)
5960     // Sometimes the setcc is introduced after select_cc has been formed.
5961     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5962         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5963       // If we're looking for eq 0 instead of ne 0, we need to invert the
5964       // condition.
5965       bool Invert = CCVal == ISD::SETEQ;
5966       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5967       if (Invert)
5968         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5969 
5970       SDLoc DL(N);
5971       RHS = LHS.getOperand(1);
5972       LHS = LHS.getOperand(0);
5973       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5974 
5975       SDValue TargetCC =
5976           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5977       return DAG.getNode(
5978           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5979           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5980     }
5981 
5982     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5983     //      (select_cc X, Y, eq/ne, trueV, falseV)
5984     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5985       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5986                          {LHS.getOperand(0), LHS.getOperand(1),
5987                           N->getOperand(2), N->getOperand(3),
5988                           N->getOperand(4)});
5989     // (select_cc X, 1, setne, trueV, falseV) ->
5990     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5991     // This can occur when legalizing some floating point comparisons.
5992     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5993     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5994       SDLoc DL(N);
5995       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5996       SDValue TargetCC =
5997           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5998       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5999       return DAG.getNode(
6000           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6001           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6002     }
6003 
6004     break;
6005   }
6006   case RISCVISD::BR_CC: {
6007     SDValue LHS = N->getOperand(1);
6008     SDValue RHS = N->getOperand(2);
6009     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6010     if (!ISD::isIntEqualitySetCC(CCVal))
6011       break;
6012 
6013     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6014     //      (br_cc X, Y, lt, dest)
6015     // Sometimes the setcc is introduced after br_cc has been formed.
6016     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6017         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6018       // If we're looking for eq 0 instead of ne 0, we need to invert the
6019       // condition.
6020       bool Invert = CCVal == ISD::SETEQ;
6021       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6022       if (Invert)
6023         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6024 
6025       SDLoc DL(N);
6026       RHS = LHS.getOperand(1);
6027       LHS = LHS.getOperand(0);
6028       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6029 
6030       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6031                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6032                          N->getOperand(4));
6033     }
6034 
6035     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6036     //      (br_cc X, Y, eq/ne, trueV, falseV)
6037     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6038       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6039                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6040                          N->getOperand(3), N->getOperand(4));
6041 
6042     // (br_cc X, 1, setne, br_cc) ->
6043     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6044     // This can occur when legalizing some floating point comparisons.
6045     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6046     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6047       SDLoc DL(N);
6048       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6049       SDValue TargetCC = DAG.getCondCode(CCVal);
6050       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6051       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6052                          N->getOperand(0), LHS, RHS, TargetCC,
6053                          N->getOperand(4));
6054     }
6055     break;
6056   }
6057   case ISD::FCOPYSIGN: {
6058     EVT VT = N->getValueType(0);
6059     if (!VT.isVector())
6060       break;
6061     // There is a form of VFSGNJ which injects the negated sign of its second
6062     // operand. Try and bubble any FNEG up after the extend/round to produce
6063     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6064     // TRUNC=1.
6065     SDValue In2 = N->getOperand(1);
6066     // Avoid cases where the extend/round has multiple uses, as duplicating
6067     // those is typically more expensive than removing a fneg.
6068     if (!In2.hasOneUse())
6069       break;
6070     if (In2.getOpcode() != ISD::FP_EXTEND &&
6071         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6072       break;
6073     In2 = In2.getOperand(0);
6074     if (In2.getOpcode() != ISD::FNEG)
6075       break;
6076     SDLoc DL(N);
6077     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6078     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6079                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6080   }
6081   case ISD::MGATHER:
6082   case ISD::MSCATTER: {
6083     if (!DCI.isBeforeLegalize())
6084       break;
6085     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
6086     SDValue Index = MGSN->getIndex();
6087     EVT IndexVT = Index.getValueType();
6088     MVT XLenVT = Subtarget.getXLenVT();
6089     // RISCV indexed loads only support the "unsigned unscaled" addressing
6090     // mode, so anything else must be manually legalized.
6091     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
6092                                 (MGSN->isIndexSigned() &&
6093                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
6094     if (!NeedsIdxLegalization)
6095       break;
6096 
6097     SDLoc DL(N);
6098 
6099     // Any index legalization should first promote to XLenVT, so we don't lose
6100     // bits when scaling. This may create an illegal index type so we let
6101     // LLVM's legalization take care of the splitting.
6102     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6103       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6104       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
6105                                                 : ISD::ZERO_EXTEND,
6106                           DL, IndexVT, Index);
6107     }
6108 
6109     unsigned Scale = N->getConstantOperandVal(5);
6110     if (MGSN->isIndexScaled() && Scale != 1) {
6111       // Manually scale the indices by the element size.
6112       // TODO: Sanitize the scale operand here?
6113       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6114       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6115       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6116     }
6117 
6118     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6119     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6120       return DAG.getMaskedGather(
6121           N->getVTList(), MGSN->getMemoryVT(), DL,
6122           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6123            MGSN->getBasePtr(), Index, MGN->getScale()},
6124           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6125     }
6126     const auto *MSN = cast<MaskedScatterSDNode>(N);
6127     return DAG.getMaskedScatter(
6128         N->getVTList(), MGSN->getMemoryVT(), DL,
6129         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6130          Index, MGSN->getScale()},
6131         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6132   }
6133   case RISCVISD::SRA_VL:
6134   case RISCVISD::SRL_VL:
6135   case RISCVISD::SHL_VL: {
6136     SDValue ShAmt = N->getOperand(1);
6137     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6138       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6139       SDLoc DL(N);
6140       SDValue VL = N->getOperand(3);
6141       EVT VT = N->getValueType(0);
6142       ShAmt =
6143           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6144       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6145                          N->getOperand(2), N->getOperand(3));
6146     }
6147     break;
6148   }
6149   case ISD::SRA:
6150   case ISD::SRL:
6151   case ISD::SHL: {
6152     SDValue ShAmt = N->getOperand(1);
6153     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6154       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6155       SDLoc DL(N);
6156       EVT VT = N->getValueType(0);
6157       ShAmt =
6158           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6159       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6160     }
6161     break;
6162   }
6163   case RISCVISD::MUL_VL: {
6164     // Try to form VWMUL or VWMULU.
6165     // FIXME: Look for splat of extended scalar as well.
6166     // FIXME: Support VWMULSU.
6167     SDValue Op0 = N->getOperand(0);
6168     SDValue Op1 = N->getOperand(1);
6169     bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6170     bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6171     if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
6172       return SDValue();
6173 
6174     // Make sure the extends have a single use.
6175     if (!Op0.hasOneUse() || !Op1.hasOneUse())
6176       return SDValue();
6177 
6178     SDValue Mask = N->getOperand(2);
6179     SDValue VL = N->getOperand(3);
6180     if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
6181         Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
6182       return SDValue();
6183 
6184     Op0 = Op0.getOperand(0);
6185     Op1 = Op1.getOperand(0);
6186 
6187     MVT VT = N->getSimpleValueType(0);
6188     MVT NarrowVT =
6189         MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
6190                          VT.getVectorElementCount());
6191 
6192     SDLoc DL(N);
6193 
6194     // Re-introduce narrower extends if needed.
6195     unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6196     if (Op0.getValueType() != NarrowVT)
6197       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6198     if (Op1.getValueType() != NarrowVT)
6199       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6200 
6201     unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6202     return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6203   }
6204   }
6205 
6206   return SDValue();
6207 }
6208 
6209 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6210     const SDNode *N, CombineLevel Level) const {
6211   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6212   // materialised in fewer instructions than `(OP _, c1)`:
6213   //
6214   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6215   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6216   SDValue N0 = N->getOperand(0);
6217   EVT Ty = N0.getValueType();
6218   if (Ty.isScalarInteger() &&
6219       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6220     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6221     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6222     if (C1 && C2) {
6223       const APInt &C1Int = C1->getAPIntValue();
6224       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6225 
6226       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6227       // and the combine should happen, to potentially allow further combines
6228       // later.
6229       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6230           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6231         return true;
6232 
6233       // We can materialise `c1` in an add immediate, so it's "free", and the
6234       // combine should be prevented.
6235       if (C1Int.getMinSignedBits() <= 64 &&
6236           isLegalAddImmediate(C1Int.getSExtValue()))
6237         return false;
6238 
6239       // Neither constant will fit into an immediate, so find materialisation
6240       // costs.
6241       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6242                                               Subtarget.getFeatureBits());
6243       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6244           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits());
6245 
6246       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6247       // combine should be prevented.
6248       if (C1Cost < ShiftedC1Cost)
6249         return false;
6250     }
6251   }
6252   return true;
6253 }
6254 
6255 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6256     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6257     TargetLoweringOpt &TLO) const {
6258   // Delay this optimization as late as possible.
6259   if (!TLO.LegalOps)
6260     return false;
6261 
6262   EVT VT = Op.getValueType();
6263   if (VT.isVector())
6264     return false;
6265 
6266   // Only handle AND for now.
6267   if (Op.getOpcode() != ISD::AND)
6268     return false;
6269 
6270   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6271   if (!C)
6272     return false;
6273 
6274   const APInt &Mask = C->getAPIntValue();
6275 
6276   // Clear all non-demanded bits initially.
6277   APInt ShrunkMask = Mask & DemandedBits;
6278 
6279   // Try to make a smaller immediate by setting undemanded bits.
6280 
6281   APInt ExpandedMask = Mask | ~DemandedBits;
6282 
6283   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6284     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6285   };
6286   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6287     if (NewMask == Mask)
6288       return true;
6289     SDLoc DL(Op);
6290     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6291     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6292     return TLO.CombineTo(Op, NewOp);
6293   };
6294 
6295   // If the shrunk mask fits in sign extended 12 bits, let the target
6296   // independent code apply it.
6297   if (ShrunkMask.isSignedIntN(12))
6298     return false;
6299 
6300   // Preserve (and X, 0xffff) when zext.h is supported.
6301   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6302     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6303     if (IsLegalMask(NewMask))
6304       return UseMask(NewMask);
6305   }
6306 
6307   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6308   if (VT == MVT::i64) {
6309     APInt NewMask = APInt(64, 0xffffffff);
6310     if (IsLegalMask(NewMask))
6311       return UseMask(NewMask);
6312   }
6313 
6314   // For the remaining optimizations, we need to be able to make a negative
6315   // number through a combination of mask and undemanded bits.
6316   if (!ExpandedMask.isNegative())
6317     return false;
6318 
6319   // What is the fewest number of bits we need to represent the negative number.
6320   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6321 
6322   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6323   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6324   APInt NewMask = ShrunkMask;
6325   if (MinSignedBits <= 12)
6326     NewMask.setBitsFrom(11);
6327   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6328     NewMask.setBitsFrom(31);
6329   else
6330     return false;
6331 
6332   // Sanity check that our new mask is a subset of the demanded mask.
6333   assert(IsLegalMask(NewMask));
6334   return UseMask(NewMask);
6335 }
6336 
6337 static void computeGREV(APInt &Src, unsigned ShAmt) {
6338   ShAmt &= Src.getBitWidth() - 1;
6339   uint64_t x = Src.getZExtValue();
6340   if (ShAmt & 1)
6341     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6342   if (ShAmt & 2)
6343     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6344   if (ShAmt & 4)
6345     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6346   if (ShAmt & 8)
6347     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6348   if (ShAmt & 16)
6349     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6350   if (ShAmt & 32)
6351     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6352   Src = x;
6353 }
6354 
6355 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6356                                                         KnownBits &Known,
6357                                                         const APInt &DemandedElts,
6358                                                         const SelectionDAG &DAG,
6359                                                         unsigned Depth) const {
6360   unsigned BitWidth = Known.getBitWidth();
6361   unsigned Opc = Op.getOpcode();
6362   assert((Opc >= ISD::BUILTIN_OP_END ||
6363           Opc == ISD::INTRINSIC_WO_CHAIN ||
6364           Opc == ISD::INTRINSIC_W_CHAIN ||
6365           Opc == ISD::INTRINSIC_VOID) &&
6366          "Should use MaskedValueIsZero if you don't know whether Op"
6367          " is a target node!");
6368 
6369   Known.resetAll();
6370   switch (Opc) {
6371   default: break;
6372   case RISCVISD::SELECT_CC: {
6373     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6374     // If we don't know any bits, early out.
6375     if (Known.isUnknown())
6376       break;
6377     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6378 
6379     // Only known if known in both the LHS and RHS.
6380     Known = KnownBits::commonBits(Known, Known2);
6381     break;
6382   }
6383   case RISCVISD::REMUW: {
6384     KnownBits Known2;
6385     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6386     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6387     // We only care about the lower 32 bits.
6388     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6389     // Restore the original width by sign extending.
6390     Known = Known.sext(BitWidth);
6391     break;
6392   }
6393   case RISCVISD::DIVUW: {
6394     KnownBits Known2;
6395     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6396     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6397     // We only care about the lower 32 bits.
6398     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6399     // Restore the original width by sign extending.
6400     Known = Known.sext(BitWidth);
6401     break;
6402   }
6403   case RISCVISD::CTZW: {
6404     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6405     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6406     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6407     Known.Zero.setBitsFrom(LowBits);
6408     break;
6409   }
6410   case RISCVISD::CLZW: {
6411     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6412     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6413     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6414     Known.Zero.setBitsFrom(LowBits);
6415     break;
6416   }
6417   case RISCVISD::GREV:
6418   case RISCVISD::GREVW: {
6419     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6420       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6421       if (Opc == RISCVISD::GREVW)
6422         Known = Known.trunc(32);
6423       unsigned ShAmt = C->getZExtValue();
6424       computeGREV(Known.Zero, ShAmt);
6425       computeGREV(Known.One, ShAmt);
6426       if (Opc == RISCVISD::GREVW)
6427         Known = Known.sext(BitWidth);
6428     }
6429     break;
6430   }
6431   case RISCVISD::READ_VLENB:
6432     // We assume VLENB is at least 16 bytes.
6433     Known.Zero.setLowBits(4);
6434     break;
6435   case ISD::INTRINSIC_W_CHAIN: {
6436     unsigned IntNo = Op.getConstantOperandVal(1);
6437     switch (IntNo) {
6438     default:
6439       // We can't do anything for most intrinsics.
6440       break;
6441     case Intrinsic::riscv_vsetvli:
6442     case Intrinsic::riscv_vsetvlimax:
6443       // Assume that VL output is positive and would fit in an int32_t.
6444       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6445       if (BitWidth >= 32)
6446         Known.Zero.setBitsFrom(31);
6447       break;
6448     }
6449     break;
6450   }
6451   }
6452 }
6453 
6454 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6455     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6456     unsigned Depth) const {
6457   switch (Op.getOpcode()) {
6458   default:
6459     break;
6460   case RISCVISD::SLLW:
6461   case RISCVISD::SRAW:
6462   case RISCVISD::SRLW:
6463   case RISCVISD::DIVW:
6464   case RISCVISD::DIVUW:
6465   case RISCVISD::REMUW:
6466   case RISCVISD::ROLW:
6467   case RISCVISD::RORW:
6468   case RISCVISD::GREVW:
6469   case RISCVISD::GORCW:
6470   case RISCVISD::FSLW:
6471   case RISCVISD::FSRW:
6472   case RISCVISD::SHFLW:
6473   case RISCVISD::UNSHFLW:
6474   case RISCVISD::BCOMPRESSW:
6475   case RISCVISD::BDECOMPRESSW:
6476     // TODO: As the result is sign-extended, this is conservatively correct. A
6477     // more precise answer could be calculated for SRAW depending on known
6478     // bits in the shift amount.
6479     return 33;
6480   case RISCVISD::SHFL:
6481   case RISCVISD::UNSHFL: {
6482     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6483     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6484     // will stay within the upper 32 bits. If there were more than 32 sign bits
6485     // before there will be at least 33 sign bits after.
6486     if (Op.getValueType() == MVT::i64 &&
6487         isa<ConstantSDNode>(Op.getOperand(1)) &&
6488         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6489       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6490       if (Tmp > 32)
6491         return 33;
6492     }
6493     break;
6494   }
6495   case RISCVISD::VMV_X_S:
6496     // The number of sign bits of the scalar result is computed by obtaining the
6497     // element type of the input vector operand, subtracting its width from the
6498     // XLEN, and then adding one (sign bit within the element type). If the
6499     // element type is wider than XLen, the least-significant XLEN bits are
6500     // taken.
6501     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6502       return 1;
6503     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6504   }
6505 
6506   return 1;
6507 }
6508 
6509 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6510                                                   MachineBasicBlock *BB) {
6511   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6512 
6513   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6514   // Should the count have wrapped while it was being read, we need to try
6515   // again.
6516   // ...
6517   // read:
6518   // rdcycleh x3 # load high word of cycle
6519   // rdcycle  x2 # load low word of cycle
6520   // rdcycleh x4 # load high word of cycle
6521   // bne x3, x4, read # check if high word reads match, otherwise try again
6522   // ...
6523 
6524   MachineFunction &MF = *BB->getParent();
6525   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6526   MachineFunction::iterator It = ++BB->getIterator();
6527 
6528   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6529   MF.insert(It, LoopMBB);
6530 
6531   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6532   MF.insert(It, DoneMBB);
6533 
6534   // Transfer the remainder of BB and its successor edges to DoneMBB.
6535   DoneMBB->splice(DoneMBB->begin(), BB,
6536                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6537   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6538 
6539   BB->addSuccessor(LoopMBB);
6540 
6541   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6542   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6543   Register LoReg = MI.getOperand(0).getReg();
6544   Register HiReg = MI.getOperand(1).getReg();
6545   DebugLoc DL = MI.getDebugLoc();
6546 
6547   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6548   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6549       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6550       .addReg(RISCV::X0);
6551   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6552       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6553       .addReg(RISCV::X0);
6554   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6555       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6556       .addReg(RISCV::X0);
6557 
6558   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6559       .addReg(HiReg)
6560       .addReg(ReadAgainReg)
6561       .addMBB(LoopMBB);
6562 
6563   LoopMBB->addSuccessor(LoopMBB);
6564   LoopMBB->addSuccessor(DoneMBB);
6565 
6566   MI.eraseFromParent();
6567 
6568   return DoneMBB;
6569 }
6570 
6571 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6572                                              MachineBasicBlock *BB) {
6573   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6574 
6575   MachineFunction &MF = *BB->getParent();
6576   DebugLoc DL = MI.getDebugLoc();
6577   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6578   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6579   Register LoReg = MI.getOperand(0).getReg();
6580   Register HiReg = MI.getOperand(1).getReg();
6581   Register SrcReg = MI.getOperand(2).getReg();
6582   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6583   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6584 
6585   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6586                           RI);
6587   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6588   MachineMemOperand *MMOLo =
6589       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6590   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6591       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6592   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6593       .addFrameIndex(FI)
6594       .addImm(0)
6595       .addMemOperand(MMOLo);
6596   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6597       .addFrameIndex(FI)
6598       .addImm(4)
6599       .addMemOperand(MMOHi);
6600   MI.eraseFromParent(); // The pseudo instruction is gone now.
6601   return BB;
6602 }
6603 
6604 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6605                                                  MachineBasicBlock *BB) {
6606   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6607          "Unexpected instruction");
6608 
6609   MachineFunction &MF = *BB->getParent();
6610   DebugLoc DL = MI.getDebugLoc();
6611   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6612   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6613   Register DstReg = MI.getOperand(0).getReg();
6614   Register LoReg = MI.getOperand(1).getReg();
6615   Register HiReg = MI.getOperand(2).getReg();
6616   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6617   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6618 
6619   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6620   MachineMemOperand *MMOLo =
6621       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6622   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6623       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6624   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6625       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6626       .addFrameIndex(FI)
6627       .addImm(0)
6628       .addMemOperand(MMOLo);
6629   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6630       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6631       .addFrameIndex(FI)
6632       .addImm(4)
6633       .addMemOperand(MMOHi);
6634   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6635   MI.eraseFromParent(); // The pseudo instruction is gone now.
6636   return BB;
6637 }
6638 
6639 static bool isSelectPseudo(MachineInstr &MI) {
6640   switch (MI.getOpcode()) {
6641   default:
6642     return false;
6643   case RISCV::Select_GPR_Using_CC_GPR:
6644   case RISCV::Select_FPR16_Using_CC_GPR:
6645   case RISCV::Select_FPR32_Using_CC_GPR:
6646   case RISCV::Select_FPR64_Using_CC_GPR:
6647     return true;
6648   }
6649 }
6650 
6651 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6652                                            MachineBasicBlock *BB) {
6653   // To "insert" Select_* instructions, we actually have to insert the triangle
6654   // control-flow pattern.  The incoming instructions know the destination vreg
6655   // to set, the condition code register to branch on, the true/false values to
6656   // select between, and the condcode to use to select the appropriate branch.
6657   //
6658   // We produce the following control flow:
6659   //     HeadMBB
6660   //     |  \
6661   //     |  IfFalseMBB
6662   //     | /
6663   //    TailMBB
6664   //
6665   // When we find a sequence of selects we attempt to optimize their emission
6666   // by sharing the control flow. Currently we only handle cases where we have
6667   // multiple selects with the exact same condition (same LHS, RHS and CC).
6668   // The selects may be interleaved with other instructions if the other
6669   // instructions meet some requirements we deem safe:
6670   // - They are debug instructions. Otherwise,
6671   // - They do not have side-effects, do not access memory and their inputs do
6672   //   not depend on the results of the select pseudo-instructions.
6673   // The TrueV/FalseV operands of the selects cannot depend on the result of
6674   // previous selects in the sequence.
6675   // These conditions could be further relaxed. See the X86 target for a
6676   // related approach and more information.
6677   Register LHS = MI.getOperand(1).getReg();
6678   Register RHS = MI.getOperand(2).getReg();
6679   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6680 
6681   SmallVector<MachineInstr *, 4> SelectDebugValues;
6682   SmallSet<Register, 4> SelectDests;
6683   SelectDests.insert(MI.getOperand(0).getReg());
6684 
6685   MachineInstr *LastSelectPseudo = &MI;
6686 
6687   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6688        SequenceMBBI != E; ++SequenceMBBI) {
6689     if (SequenceMBBI->isDebugInstr())
6690       continue;
6691     else if (isSelectPseudo(*SequenceMBBI)) {
6692       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6693           SequenceMBBI->getOperand(2).getReg() != RHS ||
6694           SequenceMBBI->getOperand(3).getImm() != CC ||
6695           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6696           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6697         break;
6698       LastSelectPseudo = &*SequenceMBBI;
6699       SequenceMBBI->collectDebugValues(SelectDebugValues);
6700       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6701     } else {
6702       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6703           SequenceMBBI->mayLoadOrStore())
6704         break;
6705       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6706             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6707           }))
6708         break;
6709     }
6710   }
6711 
6712   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6713   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6714   DebugLoc DL = MI.getDebugLoc();
6715   MachineFunction::iterator I = ++BB->getIterator();
6716 
6717   MachineBasicBlock *HeadMBB = BB;
6718   MachineFunction *F = BB->getParent();
6719   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6720   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6721 
6722   F->insert(I, IfFalseMBB);
6723   F->insert(I, TailMBB);
6724 
6725   // Transfer debug instructions associated with the selects to TailMBB.
6726   for (MachineInstr *DebugInstr : SelectDebugValues) {
6727     TailMBB->push_back(DebugInstr->removeFromParent());
6728   }
6729 
6730   // Move all instructions after the sequence to TailMBB.
6731   TailMBB->splice(TailMBB->end(), HeadMBB,
6732                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6733   // Update machine-CFG edges by transferring all successors of the current
6734   // block to the new block which will contain the Phi nodes for the selects.
6735   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6736   // Set the successors for HeadMBB.
6737   HeadMBB->addSuccessor(IfFalseMBB);
6738   HeadMBB->addSuccessor(TailMBB);
6739 
6740   // Insert appropriate branch.
6741   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6742 
6743   BuildMI(HeadMBB, DL, TII.get(Opcode))
6744     .addReg(LHS)
6745     .addReg(RHS)
6746     .addMBB(TailMBB);
6747 
6748   // IfFalseMBB just falls through to TailMBB.
6749   IfFalseMBB->addSuccessor(TailMBB);
6750 
6751   // Create PHIs for all of the select pseudo-instructions.
6752   auto SelectMBBI = MI.getIterator();
6753   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6754   auto InsertionPoint = TailMBB->begin();
6755   while (SelectMBBI != SelectEnd) {
6756     auto Next = std::next(SelectMBBI);
6757     if (isSelectPseudo(*SelectMBBI)) {
6758       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6759       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6760               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6761           .addReg(SelectMBBI->getOperand(4).getReg())
6762           .addMBB(HeadMBB)
6763           .addReg(SelectMBBI->getOperand(5).getReg())
6764           .addMBB(IfFalseMBB);
6765       SelectMBBI->eraseFromParent();
6766     }
6767     SelectMBBI = Next;
6768   }
6769 
6770   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6771   return TailMBB;
6772 }
6773 
6774 MachineBasicBlock *
6775 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6776                                                  MachineBasicBlock *BB) const {
6777   switch (MI.getOpcode()) {
6778   default:
6779     llvm_unreachable("Unexpected instr type to insert");
6780   case RISCV::ReadCycleWide:
6781     assert(!Subtarget.is64Bit() &&
6782            "ReadCycleWrite is only to be used on riscv32");
6783     return emitReadCycleWidePseudo(MI, BB);
6784   case RISCV::Select_GPR_Using_CC_GPR:
6785   case RISCV::Select_FPR16_Using_CC_GPR:
6786   case RISCV::Select_FPR32_Using_CC_GPR:
6787   case RISCV::Select_FPR64_Using_CC_GPR:
6788     return emitSelectPseudo(MI, BB);
6789   case RISCV::BuildPairF64Pseudo:
6790     return emitBuildPairF64Pseudo(MI, BB);
6791   case RISCV::SplitF64Pseudo:
6792     return emitSplitF64Pseudo(MI, BB);
6793   }
6794 }
6795 
6796 // Calling Convention Implementation.
6797 // The expectations for frontend ABI lowering vary from target to target.
6798 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6799 // details, but this is a longer term goal. For now, we simply try to keep the
6800 // role of the frontend as simple and well-defined as possible. The rules can
6801 // be summarised as:
6802 // * Never split up large scalar arguments. We handle them here.
6803 // * If a hardfloat calling convention is being used, and the struct may be
6804 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6805 // available, then pass as two separate arguments. If either the GPRs or FPRs
6806 // are exhausted, then pass according to the rule below.
6807 // * If a struct could never be passed in registers or directly in a stack
6808 // slot (as it is larger than 2*XLEN and the floating point rules don't
6809 // apply), then pass it using a pointer with the byval attribute.
6810 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6811 // word-sized array or a 2*XLEN scalar (depending on alignment).
6812 // * The frontend can determine whether a struct is returned by reference or
6813 // not based on its size and fields. If it will be returned by reference, the
6814 // frontend must modify the prototype so a pointer with the sret annotation is
6815 // passed as the first argument. This is not necessary for large scalar
6816 // returns.
6817 // * Struct return values and varargs should be coerced to structs containing
6818 // register-size fields in the same situations they would be for fixed
6819 // arguments.
6820 
6821 static const MCPhysReg ArgGPRs[] = {
6822   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6823   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6824 };
6825 static const MCPhysReg ArgFPR16s[] = {
6826   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6827   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6828 };
6829 static const MCPhysReg ArgFPR32s[] = {
6830   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6831   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6832 };
6833 static const MCPhysReg ArgFPR64s[] = {
6834   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6835   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6836 };
6837 // This is an interim calling convention and it may be changed in the future.
6838 static const MCPhysReg ArgVRs[] = {
6839     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6840     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6841     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6842 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6843                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6844                                      RISCV::V20M2, RISCV::V22M2};
6845 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6846                                      RISCV::V20M4};
6847 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6848 
6849 // Pass a 2*XLEN argument that has been split into two XLEN values through
6850 // registers or the stack as necessary.
6851 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6852                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6853                                 MVT ValVT2, MVT LocVT2,
6854                                 ISD::ArgFlagsTy ArgFlags2) {
6855   unsigned XLenInBytes = XLen / 8;
6856   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6857     // At least one half can be passed via register.
6858     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6859                                      VA1.getLocVT(), CCValAssign::Full));
6860   } else {
6861     // Both halves must be passed on the stack, with proper alignment.
6862     Align StackAlign =
6863         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6864     State.addLoc(
6865         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6866                             State.AllocateStack(XLenInBytes, StackAlign),
6867                             VA1.getLocVT(), CCValAssign::Full));
6868     State.addLoc(CCValAssign::getMem(
6869         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6870         LocVT2, CCValAssign::Full));
6871     return false;
6872   }
6873 
6874   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6875     // The second half can also be passed via register.
6876     State.addLoc(
6877         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6878   } else {
6879     // The second half is passed via the stack, without additional alignment.
6880     State.addLoc(CCValAssign::getMem(
6881         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6882         LocVT2, CCValAssign::Full));
6883   }
6884 
6885   return false;
6886 }
6887 
6888 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
6889                                Optional<unsigned> FirstMaskArgument,
6890                                CCState &State, const RISCVTargetLowering &TLI) {
6891   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6892   if (RC == &RISCV::VRRegClass) {
6893     // Assign the first mask argument to V0.
6894     // This is an interim calling convention and it may be changed in the
6895     // future.
6896     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
6897       return State.AllocateReg(RISCV::V0);
6898     return State.AllocateReg(ArgVRs);
6899   }
6900   if (RC == &RISCV::VRM2RegClass)
6901     return State.AllocateReg(ArgVRM2s);
6902   if (RC == &RISCV::VRM4RegClass)
6903     return State.AllocateReg(ArgVRM4s);
6904   if (RC == &RISCV::VRM8RegClass)
6905     return State.AllocateReg(ArgVRM8s);
6906   llvm_unreachable("Unhandled register class for ValueType");
6907 }
6908 
6909 // Implements the RISC-V calling convention. Returns true upon failure.
6910 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6911                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6912                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6913                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6914                      Optional<unsigned> FirstMaskArgument) {
6915   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6916   assert(XLen == 32 || XLen == 64);
6917   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6918 
6919   // Any return value split in to more than two values can't be returned
6920   // directly. Vectors are returned via the available vector registers.
6921   if (!LocVT.isVector() && IsRet && ValNo > 1)
6922     return true;
6923 
6924   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6925   // variadic argument, or if no F16/F32 argument registers are available.
6926   bool UseGPRForF16_F32 = true;
6927   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6928   // variadic argument, or if no F64 argument registers are available.
6929   bool UseGPRForF64 = true;
6930 
6931   switch (ABI) {
6932   default:
6933     llvm_unreachable("Unexpected ABI");
6934   case RISCVABI::ABI_ILP32:
6935   case RISCVABI::ABI_LP64:
6936     break;
6937   case RISCVABI::ABI_ILP32F:
6938   case RISCVABI::ABI_LP64F:
6939     UseGPRForF16_F32 = !IsFixed;
6940     break;
6941   case RISCVABI::ABI_ILP32D:
6942   case RISCVABI::ABI_LP64D:
6943     UseGPRForF16_F32 = !IsFixed;
6944     UseGPRForF64 = !IsFixed;
6945     break;
6946   }
6947 
6948   // FPR16, FPR32, and FPR64 alias each other.
6949   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6950     UseGPRForF16_F32 = true;
6951     UseGPRForF64 = true;
6952   }
6953 
6954   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6955   // similar local variables rather than directly checking against the target
6956   // ABI.
6957 
6958   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6959     LocVT = XLenVT;
6960     LocInfo = CCValAssign::BCvt;
6961   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6962     LocVT = MVT::i64;
6963     LocInfo = CCValAssign::BCvt;
6964   }
6965 
6966   // If this is a variadic argument, the RISC-V calling convention requires
6967   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6968   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6969   // be used regardless of whether the original argument was split during
6970   // legalisation or not. The argument will not be passed by registers if the
6971   // original type is larger than 2*XLEN, so the register alignment rule does
6972   // not apply.
6973   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6974   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6975       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6976     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6977     // Skip 'odd' register if necessary.
6978     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6979       State.AllocateReg(ArgGPRs);
6980   }
6981 
6982   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6983   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6984       State.getPendingArgFlags();
6985 
6986   assert(PendingLocs.size() == PendingArgFlags.size() &&
6987          "PendingLocs and PendingArgFlags out of sync");
6988 
6989   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6990   // registers are exhausted.
6991   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6992     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6993            "Can't lower f64 if it is split");
6994     // Depending on available argument GPRS, f64 may be passed in a pair of
6995     // GPRs, split between a GPR and the stack, or passed completely on the
6996     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6997     // cases.
6998     Register Reg = State.AllocateReg(ArgGPRs);
6999     LocVT = MVT::i32;
7000     if (!Reg) {
7001       unsigned StackOffset = State.AllocateStack(8, Align(8));
7002       State.addLoc(
7003           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7004       return false;
7005     }
7006     if (!State.AllocateReg(ArgGPRs))
7007       State.AllocateStack(4, Align(4));
7008     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7009     return false;
7010   }
7011 
7012   // Fixed-length vectors are located in the corresponding scalable-vector
7013   // container types.
7014   if (ValVT.isFixedLengthVector())
7015     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7016 
7017   // Split arguments might be passed indirectly, so keep track of the pending
7018   // values. Split vectors are passed via a mix of registers and indirectly, so
7019   // treat them as we would any other argument.
7020   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7021     LocVT = XLenVT;
7022     LocInfo = CCValAssign::Indirect;
7023     PendingLocs.push_back(
7024         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7025     PendingArgFlags.push_back(ArgFlags);
7026     if (!ArgFlags.isSplitEnd()) {
7027       return false;
7028     }
7029   }
7030 
7031   // If the split argument only had two elements, it should be passed directly
7032   // in registers or on the stack.
7033   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
7034     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7035     // Apply the normal calling convention rules to the first half of the
7036     // split argument.
7037     CCValAssign VA = PendingLocs[0];
7038     ISD::ArgFlagsTy AF = PendingArgFlags[0];
7039     PendingLocs.clear();
7040     PendingArgFlags.clear();
7041     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7042                                ArgFlags);
7043   }
7044 
7045   // Allocate to a register if possible, or else a stack slot.
7046   Register Reg;
7047   unsigned StoreSizeBytes = XLen / 8;
7048   Align StackAlign = Align(XLen / 8);
7049 
7050   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
7051     Reg = State.AllocateReg(ArgFPR16s);
7052   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
7053     Reg = State.AllocateReg(ArgFPR32s);
7054   else if (ValVT == MVT::f64 && !UseGPRForF64)
7055     Reg = State.AllocateReg(ArgFPR64s);
7056   else if (ValVT.isVector()) {
7057     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
7058     if (!Reg) {
7059       // For return values, the vector must be passed fully via registers or
7060       // via the stack.
7061       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
7062       // but we're using all of them.
7063       if (IsRet)
7064         return true;
7065       // Try using a GPR to pass the address
7066       if ((Reg = State.AllocateReg(ArgGPRs))) {
7067         LocVT = XLenVT;
7068         LocInfo = CCValAssign::Indirect;
7069       } else if (ValVT.isScalableVector()) {
7070         report_fatal_error("Unable to pass scalable vector types on the stack");
7071       } else {
7072         // Pass fixed-length vectors on the stack.
7073         LocVT = ValVT;
7074         StoreSizeBytes = ValVT.getStoreSize();
7075         // Align vectors to their element sizes, being careful for vXi1
7076         // vectors.
7077         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7078       }
7079     }
7080   } else {
7081     Reg = State.AllocateReg(ArgGPRs);
7082   }
7083 
7084   unsigned StackOffset =
7085       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7086 
7087   // If we reach this point and PendingLocs is non-empty, we must be at the
7088   // end of a split argument that must be passed indirectly.
7089   if (!PendingLocs.empty()) {
7090     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7091     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7092 
7093     for (auto &It : PendingLocs) {
7094       if (Reg)
7095         It.convertToReg(Reg);
7096       else
7097         It.convertToMem(StackOffset);
7098       State.addLoc(It);
7099     }
7100     PendingLocs.clear();
7101     PendingArgFlags.clear();
7102     return false;
7103   }
7104 
7105   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7106           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7107          "Expected an XLenVT or vector types at this stage");
7108 
7109   if (Reg) {
7110     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7111     return false;
7112   }
7113 
7114   // When a floating-point value is passed on the stack, no bit-conversion is
7115   // needed.
7116   if (ValVT.isFloatingPoint()) {
7117     LocVT = ValVT;
7118     LocInfo = CCValAssign::Full;
7119   }
7120   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7121   return false;
7122 }
7123 
7124 template <typename ArgTy>
7125 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7126   for (const auto &ArgIdx : enumerate(Args)) {
7127     MVT ArgVT = ArgIdx.value().VT;
7128     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7129       return ArgIdx.index();
7130   }
7131   return None;
7132 }
7133 
7134 void RISCVTargetLowering::analyzeInputArgs(
7135     MachineFunction &MF, CCState &CCInfo,
7136     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7137     RISCVCCAssignFn Fn) const {
7138   unsigned NumArgs = Ins.size();
7139   FunctionType *FType = MF.getFunction().getFunctionType();
7140 
7141   Optional<unsigned> FirstMaskArgument;
7142   if (Subtarget.hasStdExtV())
7143     FirstMaskArgument = preAssignMask(Ins);
7144 
7145   for (unsigned i = 0; i != NumArgs; ++i) {
7146     MVT ArgVT = Ins[i].VT;
7147     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7148 
7149     Type *ArgTy = nullptr;
7150     if (IsRet)
7151       ArgTy = FType->getReturnType();
7152     else if (Ins[i].isOrigArg())
7153       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7154 
7155     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7156     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7157            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7158            FirstMaskArgument)) {
7159       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7160                         << EVT(ArgVT).getEVTString() << '\n');
7161       llvm_unreachable(nullptr);
7162     }
7163   }
7164 }
7165 
7166 void RISCVTargetLowering::analyzeOutputArgs(
7167     MachineFunction &MF, CCState &CCInfo,
7168     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7169     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7170   unsigned NumArgs = Outs.size();
7171 
7172   Optional<unsigned> FirstMaskArgument;
7173   if (Subtarget.hasStdExtV())
7174     FirstMaskArgument = preAssignMask(Outs);
7175 
7176   for (unsigned i = 0; i != NumArgs; i++) {
7177     MVT ArgVT = Outs[i].VT;
7178     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7179     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7180 
7181     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7182     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7183            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7184            FirstMaskArgument)) {
7185       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7186                         << EVT(ArgVT).getEVTString() << "\n");
7187       llvm_unreachable(nullptr);
7188     }
7189   }
7190 }
7191 
7192 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7193 // values.
7194 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7195                                    const CCValAssign &VA, const SDLoc &DL,
7196                                    const RISCVSubtarget &Subtarget) {
7197   switch (VA.getLocInfo()) {
7198   default:
7199     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7200   case CCValAssign::Full:
7201     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7202       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7203     break;
7204   case CCValAssign::BCvt:
7205     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7206       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7207     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7208       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7209     else
7210       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7211     break;
7212   }
7213   return Val;
7214 }
7215 
7216 // The caller is responsible for loading the full value if the argument is
7217 // passed with CCValAssign::Indirect.
7218 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7219                                 const CCValAssign &VA, const SDLoc &DL,
7220                                 const RISCVTargetLowering &TLI) {
7221   MachineFunction &MF = DAG.getMachineFunction();
7222   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7223   EVT LocVT = VA.getLocVT();
7224   SDValue Val;
7225   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7226   Register VReg = RegInfo.createVirtualRegister(RC);
7227   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7228   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7229 
7230   if (VA.getLocInfo() == CCValAssign::Indirect)
7231     return Val;
7232 
7233   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7234 }
7235 
7236 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7237                                    const CCValAssign &VA, const SDLoc &DL,
7238                                    const RISCVSubtarget &Subtarget) {
7239   EVT LocVT = VA.getLocVT();
7240 
7241   switch (VA.getLocInfo()) {
7242   default:
7243     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7244   case CCValAssign::Full:
7245     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7246       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7247     break;
7248   case CCValAssign::BCvt:
7249     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7250       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7251     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7252       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7253     else
7254       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7255     break;
7256   }
7257   return Val;
7258 }
7259 
7260 // The caller is responsible for loading the full value if the argument is
7261 // passed with CCValAssign::Indirect.
7262 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7263                                 const CCValAssign &VA, const SDLoc &DL) {
7264   MachineFunction &MF = DAG.getMachineFunction();
7265   MachineFrameInfo &MFI = MF.getFrameInfo();
7266   EVT LocVT = VA.getLocVT();
7267   EVT ValVT = VA.getValVT();
7268   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7269   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7270                                  /*Immutable=*/true);
7271   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7272   SDValue Val;
7273 
7274   ISD::LoadExtType ExtType;
7275   switch (VA.getLocInfo()) {
7276   default:
7277     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7278   case CCValAssign::Full:
7279   case CCValAssign::Indirect:
7280   case CCValAssign::BCvt:
7281     ExtType = ISD::NON_EXTLOAD;
7282     break;
7283   }
7284   Val = DAG.getExtLoad(
7285       ExtType, DL, LocVT, Chain, FIN,
7286       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7287   return Val;
7288 }
7289 
7290 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7291                                        const CCValAssign &VA, const SDLoc &DL) {
7292   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7293          "Unexpected VA");
7294   MachineFunction &MF = DAG.getMachineFunction();
7295   MachineFrameInfo &MFI = MF.getFrameInfo();
7296   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7297 
7298   if (VA.isMemLoc()) {
7299     // f64 is passed on the stack.
7300     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7301     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7302     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7303                        MachinePointerInfo::getFixedStack(MF, FI));
7304   }
7305 
7306   assert(VA.isRegLoc() && "Expected register VA assignment");
7307 
7308   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7309   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7310   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7311   SDValue Hi;
7312   if (VA.getLocReg() == RISCV::X17) {
7313     // Second half of f64 is passed on the stack.
7314     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7315     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7316     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7317                      MachinePointerInfo::getFixedStack(MF, FI));
7318   } else {
7319     // Second half of f64 is passed in another GPR.
7320     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7321     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7322     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7323   }
7324   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7325 }
7326 
7327 // FastCC has less than 1% performance improvement for some particular
7328 // benchmark. But theoretically, it may has benenfit for some cases.
7329 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7330                             unsigned ValNo, MVT ValVT, MVT LocVT,
7331                             CCValAssign::LocInfo LocInfo,
7332                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7333                             bool IsFixed, bool IsRet, Type *OrigTy,
7334                             const RISCVTargetLowering &TLI,
7335                             Optional<unsigned> FirstMaskArgument) {
7336 
7337   // X5 and X6 might be used for save-restore libcall.
7338   static const MCPhysReg GPRList[] = {
7339       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7340       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7341       RISCV::X29, RISCV::X30, RISCV::X31};
7342 
7343   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7344     if (unsigned Reg = State.AllocateReg(GPRList)) {
7345       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7346       return false;
7347     }
7348   }
7349 
7350   if (LocVT == MVT::f16) {
7351     static const MCPhysReg FPR16List[] = {
7352         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7353         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7354         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7355         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7356     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7357       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7358       return false;
7359     }
7360   }
7361 
7362   if (LocVT == MVT::f32) {
7363     static const MCPhysReg FPR32List[] = {
7364         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7365         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7366         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7367         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7368     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7369       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7370       return false;
7371     }
7372   }
7373 
7374   if (LocVT == MVT::f64) {
7375     static const MCPhysReg FPR64List[] = {
7376         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7377         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7378         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7379         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7380     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7381       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7382       return false;
7383     }
7384   }
7385 
7386   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7387     unsigned Offset4 = State.AllocateStack(4, Align(4));
7388     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7389     return false;
7390   }
7391 
7392   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7393     unsigned Offset5 = State.AllocateStack(8, Align(8));
7394     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7395     return false;
7396   }
7397 
7398   if (LocVT.isVector()) {
7399     if (unsigned Reg =
7400             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7401       // Fixed-length vectors are located in the corresponding scalable-vector
7402       // container types.
7403       if (ValVT.isFixedLengthVector())
7404         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7405       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7406     } else {
7407       // Try and pass the address via a "fast" GPR.
7408       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7409         LocInfo = CCValAssign::Indirect;
7410         LocVT = TLI.getSubtarget().getXLenVT();
7411         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7412       } else if (ValVT.isFixedLengthVector()) {
7413         auto StackAlign =
7414             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7415         unsigned StackOffset =
7416             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7417         State.addLoc(
7418             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7419       } else {
7420         // Can't pass scalable vectors on the stack.
7421         return true;
7422       }
7423     }
7424 
7425     return false;
7426   }
7427 
7428   return true; // CC didn't match.
7429 }
7430 
7431 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7432                          CCValAssign::LocInfo LocInfo,
7433                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7434 
7435   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7436     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7437     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7438     static const MCPhysReg GPRList[] = {
7439         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7440         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7441     if (unsigned Reg = State.AllocateReg(GPRList)) {
7442       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7443       return false;
7444     }
7445   }
7446 
7447   if (LocVT == MVT::f32) {
7448     // Pass in STG registers: F1, ..., F6
7449     //                        fs0 ... fs5
7450     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7451                                           RISCV::F18_F, RISCV::F19_F,
7452                                           RISCV::F20_F, RISCV::F21_F};
7453     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7454       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7455       return false;
7456     }
7457   }
7458 
7459   if (LocVT == MVT::f64) {
7460     // Pass in STG registers: D1, ..., D6
7461     //                        fs6 ... fs11
7462     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7463                                           RISCV::F24_D, RISCV::F25_D,
7464                                           RISCV::F26_D, RISCV::F27_D};
7465     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7466       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7467       return false;
7468     }
7469   }
7470 
7471   report_fatal_error("No registers left in GHC calling convention");
7472   return true;
7473 }
7474 
7475 // Transform physical registers into virtual registers.
7476 SDValue RISCVTargetLowering::LowerFormalArguments(
7477     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7478     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7479     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7480 
7481   MachineFunction &MF = DAG.getMachineFunction();
7482 
7483   switch (CallConv) {
7484   default:
7485     report_fatal_error("Unsupported calling convention");
7486   case CallingConv::C:
7487   case CallingConv::Fast:
7488     break;
7489   case CallingConv::GHC:
7490     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7491         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7492       report_fatal_error(
7493         "GHC calling convention requires the F and D instruction set extensions");
7494   }
7495 
7496   const Function &Func = MF.getFunction();
7497   if (Func.hasFnAttribute("interrupt")) {
7498     if (!Func.arg_empty())
7499       report_fatal_error(
7500         "Functions with the interrupt attribute cannot have arguments!");
7501 
7502     StringRef Kind =
7503       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7504 
7505     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7506       report_fatal_error(
7507         "Function interrupt attribute argument not supported!");
7508   }
7509 
7510   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7511   MVT XLenVT = Subtarget.getXLenVT();
7512   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7513   // Used with vargs to acumulate store chains.
7514   std::vector<SDValue> OutChains;
7515 
7516   // Assign locations to all of the incoming arguments.
7517   SmallVector<CCValAssign, 16> ArgLocs;
7518   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7519 
7520   if (CallConv == CallingConv::GHC)
7521     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7522   else
7523     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7524                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7525                                                    : CC_RISCV);
7526 
7527   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7528     CCValAssign &VA = ArgLocs[i];
7529     SDValue ArgValue;
7530     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7531     // case.
7532     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7533       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7534     else if (VA.isRegLoc())
7535       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7536     else
7537       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7538 
7539     if (VA.getLocInfo() == CCValAssign::Indirect) {
7540       // If the original argument was split and passed by reference (e.g. i128
7541       // on RV32), we need to load all parts of it here (using the same
7542       // address). Vectors may be partly split to registers and partly to the
7543       // stack, in which case the base address is partly offset and subsequent
7544       // stores are relative to that.
7545       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7546                                    MachinePointerInfo()));
7547       unsigned ArgIndex = Ins[i].OrigArgIndex;
7548       unsigned ArgPartOffset = Ins[i].PartOffset;
7549       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7550       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7551         CCValAssign &PartVA = ArgLocs[i + 1];
7552         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7553         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7554         if (PartVA.getValVT().isScalableVector())
7555           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7556         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7557         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7558                                      MachinePointerInfo()));
7559         ++i;
7560       }
7561       continue;
7562     }
7563     InVals.push_back(ArgValue);
7564   }
7565 
7566   if (IsVarArg) {
7567     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7568     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7569     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7570     MachineFrameInfo &MFI = MF.getFrameInfo();
7571     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7572     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7573 
7574     // Offset of the first variable argument from stack pointer, and size of
7575     // the vararg save area. For now, the varargs save area is either zero or
7576     // large enough to hold a0-a7.
7577     int VaArgOffset, VarArgsSaveSize;
7578 
7579     // If all registers are allocated, then all varargs must be passed on the
7580     // stack and we don't need to save any argregs.
7581     if (ArgRegs.size() == Idx) {
7582       VaArgOffset = CCInfo.getNextStackOffset();
7583       VarArgsSaveSize = 0;
7584     } else {
7585       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7586       VaArgOffset = -VarArgsSaveSize;
7587     }
7588 
7589     // Record the frame index of the first variable argument
7590     // which is a value necessary to VASTART.
7591     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7592     RVFI->setVarArgsFrameIndex(FI);
7593 
7594     // If saving an odd number of registers then create an extra stack slot to
7595     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7596     // offsets to even-numbered registered remain 2*XLEN-aligned.
7597     if (Idx % 2) {
7598       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7599       VarArgsSaveSize += XLenInBytes;
7600     }
7601 
7602     // Copy the integer registers that may have been used for passing varargs
7603     // to the vararg save area.
7604     for (unsigned I = Idx; I < ArgRegs.size();
7605          ++I, VaArgOffset += XLenInBytes) {
7606       const Register Reg = RegInfo.createVirtualRegister(RC);
7607       RegInfo.addLiveIn(ArgRegs[I], Reg);
7608       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7609       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7610       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7611       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7612                                    MachinePointerInfo::getFixedStack(MF, FI));
7613       cast<StoreSDNode>(Store.getNode())
7614           ->getMemOperand()
7615           ->setValue((Value *)nullptr);
7616       OutChains.push_back(Store);
7617     }
7618     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7619   }
7620 
7621   // All stores are grouped in one node to allow the matching between
7622   // the size of Ins and InVals. This only happens for vararg functions.
7623   if (!OutChains.empty()) {
7624     OutChains.push_back(Chain);
7625     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7626   }
7627 
7628   return Chain;
7629 }
7630 
7631 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7632 /// for tail call optimization.
7633 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7634 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7635     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7636     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7637 
7638   auto &Callee = CLI.Callee;
7639   auto CalleeCC = CLI.CallConv;
7640   auto &Outs = CLI.Outs;
7641   auto &Caller = MF.getFunction();
7642   auto CallerCC = Caller.getCallingConv();
7643 
7644   // Exception-handling functions need a special set of instructions to
7645   // indicate a return to the hardware. Tail-calling another function would
7646   // probably break this.
7647   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7648   // should be expanded as new function attributes are introduced.
7649   if (Caller.hasFnAttribute("interrupt"))
7650     return false;
7651 
7652   // Do not tail call opt if the stack is used to pass parameters.
7653   if (CCInfo.getNextStackOffset() != 0)
7654     return false;
7655 
7656   // Do not tail call opt if any parameters need to be passed indirectly.
7657   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7658   // passed indirectly. So the address of the value will be passed in a
7659   // register, or if not available, then the address is put on the stack. In
7660   // order to pass indirectly, space on the stack often needs to be allocated
7661   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7662   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7663   // are passed CCValAssign::Indirect.
7664   for (auto &VA : ArgLocs)
7665     if (VA.getLocInfo() == CCValAssign::Indirect)
7666       return false;
7667 
7668   // Do not tail call opt if either caller or callee uses struct return
7669   // semantics.
7670   auto IsCallerStructRet = Caller.hasStructRetAttr();
7671   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7672   if (IsCallerStructRet || IsCalleeStructRet)
7673     return false;
7674 
7675   // Externally-defined functions with weak linkage should not be
7676   // tail-called. The behaviour of branch instructions in this situation (as
7677   // used for tail calls) is implementation-defined, so we cannot rely on the
7678   // linker replacing the tail call with a return.
7679   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7680     const GlobalValue *GV = G->getGlobal();
7681     if (GV->hasExternalWeakLinkage())
7682       return false;
7683   }
7684 
7685   // The callee has to preserve all registers the caller needs to preserve.
7686   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7687   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7688   if (CalleeCC != CallerCC) {
7689     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7690     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7691       return false;
7692   }
7693 
7694   // Byval parameters hand the function a pointer directly into the stack area
7695   // we want to reuse during a tail call. Working around this *is* possible
7696   // but less efficient and uglier in LowerCall.
7697   for (auto &Arg : Outs)
7698     if (Arg.Flags.isByVal())
7699       return false;
7700 
7701   return true;
7702 }
7703 
7704 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7705   return DAG.getDataLayout().getPrefTypeAlign(
7706       VT.getTypeForEVT(*DAG.getContext()));
7707 }
7708 
7709 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7710 // and output parameter nodes.
7711 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7712                                        SmallVectorImpl<SDValue> &InVals) const {
7713   SelectionDAG &DAG = CLI.DAG;
7714   SDLoc &DL = CLI.DL;
7715   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7716   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7717   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7718   SDValue Chain = CLI.Chain;
7719   SDValue Callee = CLI.Callee;
7720   bool &IsTailCall = CLI.IsTailCall;
7721   CallingConv::ID CallConv = CLI.CallConv;
7722   bool IsVarArg = CLI.IsVarArg;
7723   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7724   MVT XLenVT = Subtarget.getXLenVT();
7725 
7726   MachineFunction &MF = DAG.getMachineFunction();
7727 
7728   // Analyze the operands of the call, assigning locations to each operand.
7729   SmallVector<CCValAssign, 16> ArgLocs;
7730   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7731 
7732   if (CallConv == CallingConv::GHC)
7733     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7734   else
7735     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7736                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7737                                                     : CC_RISCV);
7738 
7739   // Check if it's really possible to do a tail call.
7740   if (IsTailCall)
7741     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7742 
7743   if (IsTailCall)
7744     ++NumTailCalls;
7745   else if (CLI.CB && CLI.CB->isMustTailCall())
7746     report_fatal_error("failed to perform tail call elimination on a call "
7747                        "site marked musttail");
7748 
7749   // Get a count of how many bytes are to be pushed on the stack.
7750   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7751 
7752   // Create local copies for byval args
7753   SmallVector<SDValue, 8> ByValArgs;
7754   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7755     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7756     if (!Flags.isByVal())
7757       continue;
7758 
7759     SDValue Arg = OutVals[i];
7760     unsigned Size = Flags.getByValSize();
7761     Align Alignment = Flags.getNonZeroByValAlign();
7762 
7763     int FI =
7764         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7765     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7766     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7767 
7768     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7769                           /*IsVolatile=*/false,
7770                           /*AlwaysInline=*/false, IsTailCall,
7771                           MachinePointerInfo(), MachinePointerInfo());
7772     ByValArgs.push_back(FIPtr);
7773   }
7774 
7775   if (!IsTailCall)
7776     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7777 
7778   // Copy argument values to their designated locations.
7779   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7780   SmallVector<SDValue, 8> MemOpChains;
7781   SDValue StackPtr;
7782   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7783     CCValAssign &VA = ArgLocs[i];
7784     SDValue ArgValue = OutVals[i];
7785     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7786 
7787     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7788     bool IsF64OnRV32DSoftABI =
7789         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7790     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7791       SDValue SplitF64 = DAG.getNode(
7792           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7793       SDValue Lo = SplitF64.getValue(0);
7794       SDValue Hi = SplitF64.getValue(1);
7795 
7796       Register RegLo = VA.getLocReg();
7797       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7798 
7799       if (RegLo == RISCV::X17) {
7800         // Second half of f64 is passed on the stack.
7801         // Work out the address of the stack slot.
7802         if (!StackPtr.getNode())
7803           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7804         // Emit the store.
7805         MemOpChains.push_back(
7806             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7807       } else {
7808         // Second half of f64 is passed in another GPR.
7809         assert(RegLo < RISCV::X31 && "Invalid register pair");
7810         Register RegHigh = RegLo + 1;
7811         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7812       }
7813       continue;
7814     }
7815 
7816     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7817     // as any other MemLoc.
7818 
7819     // Promote the value if needed.
7820     // For now, only handle fully promoted and indirect arguments.
7821     if (VA.getLocInfo() == CCValAssign::Indirect) {
7822       // Store the argument in a stack slot and pass its address.
7823       Align StackAlign =
7824           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7825                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7826       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7827       // If the original argument was split (e.g. i128), we need
7828       // to store the required parts of it here (and pass just one address).
7829       // Vectors may be partly split to registers and partly to the stack, in
7830       // which case the base address is partly offset and subsequent stores are
7831       // relative to that.
7832       unsigned ArgIndex = Outs[i].OrigArgIndex;
7833       unsigned ArgPartOffset = Outs[i].PartOffset;
7834       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7835       // Calculate the total size to store. We don't have access to what we're
7836       // actually storing other than performing the loop and collecting the
7837       // info.
7838       SmallVector<std::pair<SDValue, SDValue>> Parts;
7839       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7840         SDValue PartValue = OutVals[i + 1];
7841         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7842         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7843         EVT PartVT = PartValue.getValueType();
7844         if (PartVT.isScalableVector())
7845           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7846         StoredSize += PartVT.getStoreSize();
7847         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7848         Parts.push_back(std::make_pair(PartValue, Offset));
7849         ++i;
7850       }
7851       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7852       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7853       MemOpChains.push_back(
7854           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7855                        MachinePointerInfo::getFixedStack(MF, FI)));
7856       for (const auto &Part : Parts) {
7857         SDValue PartValue = Part.first;
7858         SDValue PartOffset = Part.second;
7859         SDValue Address =
7860             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7861         MemOpChains.push_back(
7862             DAG.getStore(Chain, DL, PartValue, Address,
7863                          MachinePointerInfo::getFixedStack(MF, FI)));
7864       }
7865       ArgValue = SpillSlot;
7866     } else {
7867       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7868     }
7869 
7870     // Use local copy if it is a byval arg.
7871     if (Flags.isByVal())
7872       ArgValue = ByValArgs[j++];
7873 
7874     if (VA.isRegLoc()) {
7875       // Queue up the argument copies and emit them at the end.
7876       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7877     } else {
7878       assert(VA.isMemLoc() && "Argument not register or memory");
7879       assert(!IsTailCall && "Tail call not allowed if stack is used "
7880                             "for passing parameters");
7881 
7882       // Work out the address of the stack slot.
7883       if (!StackPtr.getNode())
7884         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7885       SDValue Address =
7886           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7887                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7888 
7889       // Emit the store.
7890       MemOpChains.push_back(
7891           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7892     }
7893   }
7894 
7895   // Join the stores, which are independent of one another.
7896   if (!MemOpChains.empty())
7897     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7898 
7899   SDValue Glue;
7900 
7901   // Build a sequence of copy-to-reg nodes, chained and glued together.
7902   for (auto &Reg : RegsToPass) {
7903     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7904     Glue = Chain.getValue(1);
7905   }
7906 
7907   // Validate that none of the argument registers have been marked as
7908   // reserved, if so report an error. Do the same for the return address if this
7909   // is not a tailcall.
7910   validateCCReservedRegs(RegsToPass, MF);
7911   if (!IsTailCall &&
7912       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7913     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7914         MF.getFunction(),
7915         "Return address register required, but has been reserved."});
7916 
7917   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7918   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7919   // split it and then direct call can be matched by PseudoCALL.
7920   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7921     const GlobalValue *GV = S->getGlobal();
7922 
7923     unsigned OpFlags = RISCVII::MO_CALL;
7924     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7925       OpFlags = RISCVII::MO_PLT;
7926 
7927     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7928   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7929     unsigned OpFlags = RISCVII::MO_CALL;
7930 
7931     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7932                                                  nullptr))
7933       OpFlags = RISCVII::MO_PLT;
7934 
7935     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7936   }
7937 
7938   // The first call operand is the chain and the second is the target address.
7939   SmallVector<SDValue, 8> Ops;
7940   Ops.push_back(Chain);
7941   Ops.push_back(Callee);
7942 
7943   // Add argument registers to the end of the list so that they are
7944   // known live into the call.
7945   for (auto &Reg : RegsToPass)
7946     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7947 
7948   if (!IsTailCall) {
7949     // Add a register mask operand representing the call-preserved registers.
7950     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7951     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7952     assert(Mask && "Missing call preserved mask for calling convention");
7953     Ops.push_back(DAG.getRegisterMask(Mask));
7954   }
7955 
7956   // Glue the call to the argument copies, if any.
7957   if (Glue.getNode())
7958     Ops.push_back(Glue);
7959 
7960   // Emit the call.
7961   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7962 
7963   if (IsTailCall) {
7964     MF.getFrameInfo().setHasTailCall();
7965     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7966   }
7967 
7968   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7969   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7970   Glue = Chain.getValue(1);
7971 
7972   // Mark the end of the call, which is glued to the call itself.
7973   Chain = DAG.getCALLSEQ_END(Chain,
7974                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7975                              DAG.getConstant(0, DL, PtrVT, true),
7976                              Glue, DL);
7977   Glue = Chain.getValue(1);
7978 
7979   // Assign locations to each value returned by this call.
7980   SmallVector<CCValAssign, 16> RVLocs;
7981   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7982   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
7983 
7984   // Copy all of the result registers out of their specified physreg.
7985   for (auto &VA : RVLocs) {
7986     // Copy the value out
7987     SDValue RetValue =
7988         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7989     // Glue the RetValue to the end of the call sequence
7990     Chain = RetValue.getValue(1);
7991     Glue = RetValue.getValue(2);
7992 
7993     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7994       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7995       SDValue RetValue2 =
7996           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7997       Chain = RetValue2.getValue(1);
7998       Glue = RetValue2.getValue(2);
7999       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8000                              RetValue2);
8001     }
8002 
8003     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8004 
8005     InVals.push_back(RetValue);
8006   }
8007 
8008   return Chain;
8009 }
8010 
8011 bool RISCVTargetLowering::CanLowerReturn(
8012     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8013     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8014   SmallVector<CCValAssign, 16> RVLocs;
8015   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8016 
8017   Optional<unsigned> FirstMaskArgument;
8018   if (Subtarget.hasStdExtV())
8019     FirstMaskArgument = preAssignMask(Outs);
8020 
8021   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8022     MVT VT = Outs[i].VT;
8023     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8024     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8025     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8026                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8027                  *this, FirstMaskArgument))
8028       return false;
8029   }
8030   return true;
8031 }
8032 
8033 SDValue
8034 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8035                                  bool IsVarArg,
8036                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
8037                                  const SmallVectorImpl<SDValue> &OutVals,
8038                                  const SDLoc &DL, SelectionDAG &DAG) const {
8039   const MachineFunction &MF = DAG.getMachineFunction();
8040   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8041 
8042   // Stores the assignment of the return value to a location.
8043   SmallVector<CCValAssign, 16> RVLocs;
8044 
8045   // Info about the registers and stack slot.
8046   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
8047                  *DAG.getContext());
8048 
8049   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
8050                     nullptr, CC_RISCV);
8051 
8052   if (CallConv == CallingConv::GHC && !RVLocs.empty())
8053     report_fatal_error("GHC functions return void only");
8054 
8055   SDValue Glue;
8056   SmallVector<SDValue, 4> RetOps(1, Chain);
8057 
8058   // Copy the result values into the output registers.
8059   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
8060     SDValue Val = OutVals[i];
8061     CCValAssign &VA = RVLocs[i];
8062     assert(VA.isRegLoc() && "Can only return in registers!");
8063 
8064     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8065       // Handle returning f64 on RV32D with a soft float ABI.
8066       assert(VA.isRegLoc() && "Expected return via registers");
8067       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
8068                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
8069       SDValue Lo = SplitF64.getValue(0);
8070       SDValue Hi = SplitF64.getValue(1);
8071       Register RegLo = VA.getLocReg();
8072       assert(RegLo < RISCV::X31 && "Invalid register pair");
8073       Register RegHi = RegLo + 1;
8074 
8075       if (STI.isRegisterReservedByUser(RegLo) ||
8076           STI.isRegisterReservedByUser(RegHi))
8077         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8078             MF.getFunction(),
8079             "Return value register required, but has been reserved."});
8080 
8081       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8082       Glue = Chain.getValue(1);
8083       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8084       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8085       Glue = Chain.getValue(1);
8086       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8087     } else {
8088       // Handle a 'normal' return.
8089       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8090       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8091 
8092       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8093         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8094             MF.getFunction(),
8095             "Return value register required, but has been reserved."});
8096 
8097       // Guarantee that all emitted copies are stuck together.
8098       Glue = Chain.getValue(1);
8099       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8100     }
8101   }
8102 
8103   RetOps[0] = Chain; // Update chain.
8104 
8105   // Add the glue node if we have it.
8106   if (Glue.getNode()) {
8107     RetOps.push_back(Glue);
8108   }
8109 
8110   unsigned RetOpc = RISCVISD::RET_FLAG;
8111   // Interrupt service routines use different return instructions.
8112   const Function &Func = DAG.getMachineFunction().getFunction();
8113   if (Func.hasFnAttribute("interrupt")) {
8114     if (!Func.getReturnType()->isVoidTy())
8115       report_fatal_error(
8116           "Functions with the interrupt attribute must have void return type!");
8117 
8118     MachineFunction &MF = DAG.getMachineFunction();
8119     StringRef Kind =
8120       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8121 
8122     if (Kind == "user")
8123       RetOpc = RISCVISD::URET_FLAG;
8124     else if (Kind == "supervisor")
8125       RetOpc = RISCVISD::SRET_FLAG;
8126     else
8127       RetOpc = RISCVISD::MRET_FLAG;
8128   }
8129 
8130   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8131 }
8132 
8133 void RISCVTargetLowering::validateCCReservedRegs(
8134     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8135     MachineFunction &MF) const {
8136   const Function &F = MF.getFunction();
8137   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8138 
8139   if (llvm::any_of(Regs, [&STI](auto Reg) {
8140         return STI.isRegisterReservedByUser(Reg.first);
8141       }))
8142     F.getContext().diagnose(DiagnosticInfoUnsupported{
8143         F, "Argument register required, but has been reserved."});
8144 }
8145 
8146 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8147   return CI->isTailCall();
8148 }
8149 
8150 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8151 #define NODE_NAME_CASE(NODE)                                                   \
8152   case RISCVISD::NODE:                                                         \
8153     return "RISCVISD::" #NODE;
8154   // clang-format off
8155   switch ((RISCVISD::NodeType)Opcode) {
8156   case RISCVISD::FIRST_NUMBER:
8157     break;
8158   NODE_NAME_CASE(RET_FLAG)
8159   NODE_NAME_CASE(URET_FLAG)
8160   NODE_NAME_CASE(SRET_FLAG)
8161   NODE_NAME_CASE(MRET_FLAG)
8162   NODE_NAME_CASE(CALL)
8163   NODE_NAME_CASE(SELECT_CC)
8164   NODE_NAME_CASE(BR_CC)
8165   NODE_NAME_CASE(BuildPairF64)
8166   NODE_NAME_CASE(SplitF64)
8167   NODE_NAME_CASE(TAIL)
8168   NODE_NAME_CASE(MULHSU)
8169   NODE_NAME_CASE(SLLW)
8170   NODE_NAME_CASE(SRAW)
8171   NODE_NAME_CASE(SRLW)
8172   NODE_NAME_CASE(DIVW)
8173   NODE_NAME_CASE(DIVUW)
8174   NODE_NAME_CASE(REMUW)
8175   NODE_NAME_CASE(ROLW)
8176   NODE_NAME_CASE(RORW)
8177   NODE_NAME_CASE(CLZW)
8178   NODE_NAME_CASE(CTZW)
8179   NODE_NAME_CASE(FSLW)
8180   NODE_NAME_CASE(FSRW)
8181   NODE_NAME_CASE(FSL)
8182   NODE_NAME_CASE(FSR)
8183   NODE_NAME_CASE(FMV_H_X)
8184   NODE_NAME_CASE(FMV_X_ANYEXTH)
8185   NODE_NAME_CASE(FMV_W_X_RV64)
8186   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8187   NODE_NAME_CASE(READ_CYCLE_WIDE)
8188   NODE_NAME_CASE(GREV)
8189   NODE_NAME_CASE(GREVW)
8190   NODE_NAME_CASE(GORC)
8191   NODE_NAME_CASE(GORCW)
8192   NODE_NAME_CASE(SHFL)
8193   NODE_NAME_CASE(SHFLW)
8194   NODE_NAME_CASE(UNSHFL)
8195   NODE_NAME_CASE(UNSHFLW)
8196   NODE_NAME_CASE(BCOMPRESS)
8197   NODE_NAME_CASE(BCOMPRESSW)
8198   NODE_NAME_CASE(BDECOMPRESS)
8199   NODE_NAME_CASE(BDECOMPRESSW)
8200   NODE_NAME_CASE(VMV_V_X_VL)
8201   NODE_NAME_CASE(VFMV_V_F_VL)
8202   NODE_NAME_CASE(VMV_X_S)
8203   NODE_NAME_CASE(VMV_S_X_VL)
8204   NODE_NAME_CASE(VFMV_S_F_VL)
8205   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8206   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8207   NODE_NAME_CASE(READ_VLENB)
8208   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8209   NODE_NAME_CASE(VSLIDEUP_VL)
8210   NODE_NAME_CASE(VSLIDE1UP_VL)
8211   NODE_NAME_CASE(VSLIDEDOWN_VL)
8212   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8213   NODE_NAME_CASE(VID_VL)
8214   NODE_NAME_CASE(VFNCVT_ROD_VL)
8215   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8216   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8217   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8218   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8219   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8220   NODE_NAME_CASE(VECREDUCE_AND_VL)
8221   NODE_NAME_CASE(VECREDUCE_OR_VL)
8222   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8223   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8224   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8225   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8226   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8227   NODE_NAME_CASE(ADD_VL)
8228   NODE_NAME_CASE(AND_VL)
8229   NODE_NAME_CASE(MUL_VL)
8230   NODE_NAME_CASE(OR_VL)
8231   NODE_NAME_CASE(SDIV_VL)
8232   NODE_NAME_CASE(SHL_VL)
8233   NODE_NAME_CASE(SREM_VL)
8234   NODE_NAME_CASE(SRA_VL)
8235   NODE_NAME_CASE(SRL_VL)
8236   NODE_NAME_CASE(SUB_VL)
8237   NODE_NAME_CASE(UDIV_VL)
8238   NODE_NAME_CASE(UREM_VL)
8239   NODE_NAME_CASE(XOR_VL)
8240   NODE_NAME_CASE(FADD_VL)
8241   NODE_NAME_CASE(FSUB_VL)
8242   NODE_NAME_CASE(FMUL_VL)
8243   NODE_NAME_CASE(FDIV_VL)
8244   NODE_NAME_CASE(FNEG_VL)
8245   NODE_NAME_CASE(FABS_VL)
8246   NODE_NAME_CASE(FSQRT_VL)
8247   NODE_NAME_CASE(FMA_VL)
8248   NODE_NAME_CASE(FCOPYSIGN_VL)
8249   NODE_NAME_CASE(SMIN_VL)
8250   NODE_NAME_CASE(SMAX_VL)
8251   NODE_NAME_CASE(UMIN_VL)
8252   NODE_NAME_CASE(UMAX_VL)
8253   NODE_NAME_CASE(FMINNUM_VL)
8254   NODE_NAME_CASE(FMAXNUM_VL)
8255   NODE_NAME_CASE(MULHS_VL)
8256   NODE_NAME_CASE(MULHU_VL)
8257   NODE_NAME_CASE(FP_TO_SINT_VL)
8258   NODE_NAME_CASE(FP_TO_UINT_VL)
8259   NODE_NAME_CASE(SINT_TO_FP_VL)
8260   NODE_NAME_CASE(UINT_TO_FP_VL)
8261   NODE_NAME_CASE(FP_EXTEND_VL)
8262   NODE_NAME_CASE(FP_ROUND_VL)
8263   NODE_NAME_CASE(VWMUL_VL)
8264   NODE_NAME_CASE(VWMULU_VL)
8265   NODE_NAME_CASE(SETCC_VL)
8266   NODE_NAME_CASE(VSELECT_VL)
8267   NODE_NAME_CASE(VMAND_VL)
8268   NODE_NAME_CASE(VMOR_VL)
8269   NODE_NAME_CASE(VMXOR_VL)
8270   NODE_NAME_CASE(VMCLR_VL)
8271   NODE_NAME_CASE(VMSET_VL)
8272   NODE_NAME_CASE(VRGATHER_VX_VL)
8273   NODE_NAME_CASE(VRGATHER_VV_VL)
8274   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8275   NODE_NAME_CASE(VSEXT_VL)
8276   NODE_NAME_CASE(VZEXT_VL)
8277   NODE_NAME_CASE(VPOPC_VL)
8278   NODE_NAME_CASE(VLE_VL)
8279   NODE_NAME_CASE(VSE_VL)
8280   NODE_NAME_CASE(READ_CSR)
8281   NODE_NAME_CASE(WRITE_CSR)
8282   NODE_NAME_CASE(SWAP_CSR)
8283   }
8284   // clang-format on
8285   return nullptr;
8286 #undef NODE_NAME_CASE
8287 }
8288 
8289 /// getConstraintType - Given a constraint letter, return the type of
8290 /// constraint it is for this target.
8291 RISCVTargetLowering::ConstraintType
8292 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8293   if (Constraint.size() == 1) {
8294     switch (Constraint[0]) {
8295     default:
8296       break;
8297     case 'f':
8298     case 'v':
8299       return C_RegisterClass;
8300     case 'I':
8301     case 'J':
8302     case 'K':
8303       return C_Immediate;
8304     case 'A':
8305       return C_Memory;
8306     case 'S': // A symbolic address
8307       return C_Other;
8308     }
8309   }
8310   return TargetLowering::getConstraintType(Constraint);
8311 }
8312 
8313 std::pair<unsigned, const TargetRegisterClass *>
8314 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8315                                                   StringRef Constraint,
8316                                                   MVT VT) const {
8317   // First, see if this is a constraint that directly corresponds to a
8318   // RISCV register class.
8319   if (Constraint.size() == 1) {
8320     switch (Constraint[0]) {
8321     case 'r':
8322       return std::make_pair(0U, &RISCV::GPRRegClass);
8323     case 'f':
8324       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8325         return std::make_pair(0U, &RISCV::FPR16RegClass);
8326       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8327         return std::make_pair(0U, &RISCV::FPR32RegClass);
8328       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8329         return std::make_pair(0U, &RISCV::FPR64RegClass);
8330       break;
8331     case 'v':
8332       for (const auto *RC :
8333            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8334             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8335         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8336           return std::make_pair(0U, RC);
8337       }
8338       break;
8339     default:
8340       break;
8341     }
8342   }
8343 
8344   // Clang will correctly decode the usage of register name aliases into their
8345   // official names. However, other frontends like `rustc` do not. This allows
8346   // users of these frontends to use the ABI names for registers in LLVM-style
8347   // register constraints.
8348   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8349                                .Case("{zero}", RISCV::X0)
8350                                .Case("{ra}", RISCV::X1)
8351                                .Case("{sp}", RISCV::X2)
8352                                .Case("{gp}", RISCV::X3)
8353                                .Case("{tp}", RISCV::X4)
8354                                .Case("{t0}", RISCV::X5)
8355                                .Case("{t1}", RISCV::X6)
8356                                .Case("{t2}", RISCV::X7)
8357                                .Cases("{s0}", "{fp}", RISCV::X8)
8358                                .Case("{s1}", RISCV::X9)
8359                                .Case("{a0}", RISCV::X10)
8360                                .Case("{a1}", RISCV::X11)
8361                                .Case("{a2}", RISCV::X12)
8362                                .Case("{a3}", RISCV::X13)
8363                                .Case("{a4}", RISCV::X14)
8364                                .Case("{a5}", RISCV::X15)
8365                                .Case("{a6}", RISCV::X16)
8366                                .Case("{a7}", RISCV::X17)
8367                                .Case("{s2}", RISCV::X18)
8368                                .Case("{s3}", RISCV::X19)
8369                                .Case("{s4}", RISCV::X20)
8370                                .Case("{s5}", RISCV::X21)
8371                                .Case("{s6}", RISCV::X22)
8372                                .Case("{s7}", RISCV::X23)
8373                                .Case("{s8}", RISCV::X24)
8374                                .Case("{s9}", RISCV::X25)
8375                                .Case("{s10}", RISCV::X26)
8376                                .Case("{s11}", RISCV::X27)
8377                                .Case("{t3}", RISCV::X28)
8378                                .Case("{t4}", RISCV::X29)
8379                                .Case("{t5}", RISCV::X30)
8380                                .Case("{t6}", RISCV::X31)
8381                                .Default(RISCV::NoRegister);
8382   if (XRegFromAlias != RISCV::NoRegister)
8383     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8384 
8385   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8386   // TableGen record rather than the AsmName to choose registers for InlineAsm
8387   // constraints, plus we want to match those names to the widest floating point
8388   // register type available, manually select floating point registers here.
8389   //
8390   // The second case is the ABI name of the register, so that frontends can also
8391   // use the ABI names in register constraint lists.
8392   if (Subtarget.hasStdExtF()) {
8393     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8394                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8395                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8396                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8397                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8398                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8399                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8400                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8401                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8402                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8403                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8404                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8405                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8406                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8407                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8408                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8409                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8410                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8411                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8412                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8413                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8414                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8415                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8416                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8417                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8418                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8419                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8420                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8421                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8422                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8423                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8424                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8425                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8426                         .Default(RISCV::NoRegister);
8427     if (FReg != RISCV::NoRegister) {
8428       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8429       if (Subtarget.hasStdExtD()) {
8430         unsigned RegNo = FReg - RISCV::F0_F;
8431         unsigned DReg = RISCV::F0_D + RegNo;
8432         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8433       }
8434       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8435     }
8436   }
8437 
8438   if (Subtarget.hasStdExtV()) {
8439     Register VReg = StringSwitch<Register>(Constraint.lower())
8440                         .Case("{v0}", RISCV::V0)
8441                         .Case("{v1}", RISCV::V1)
8442                         .Case("{v2}", RISCV::V2)
8443                         .Case("{v3}", RISCV::V3)
8444                         .Case("{v4}", RISCV::V4)
8445                         .Case("{v5}", RISCV::V5)
8446                         .Case("{v6}", RISCV::V6)
8447                         .Case("{v7}", RISCV::V7)
8448                         .Case("{v8}", RISCV::V8)
8449                         .Case("{v9}", RISCV::V9)
8450                         .Case("{v10}", RISCV::V10)
8451                         .Case("{v11}", RISCV::V11)
8452                         .Case("{v12}", RISCV::V12)
8453                         .Case("{v13}", RISCV::V13)
8454                         .Case("{v14}", RISCV::V14)
8455                         .Case("{v15}", RISCV::V15)
8456                         .Case("{v16}", RISCV::V16)
8457                         .Case("{v17}", RISCV::V17)
8458                         .Case("{v18}", RISCV::V18)
8459                         .Case("{v19}", RISCV::V19)
8460                         .Case("{v20}", RISCV::V20)
8461                         .Case("{v21}", RISCV::V21)
8462                         .Case("{v22}", RISCV::V22)
8463                         .Case("{v23}", RISCV::V23)
8464                         .Case("{v24}", RISCV::V24)
8465                         .Case("{v25}", RISCV::V25)
8466                         .Case("{v26}", RISCV::V26)
8467                         .Case("{v27}", RISCV::V27)
8468                         .Case("{v28}", RISCV::V28)
8469                         .Case("{v29}", RISCV::V29)
8470                         .Case("{v30}", RISCV::V30)
8471                         .Case("{v31}", RISCV::V31)
8472                         .Default(RISCV::NoRegister);
8473     if (VReg != RISCV::NoRegister) {
8474       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8475         return std::make_pair(VReg, &RISCV::VMRegClass);
8476       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8477         return std::make_pair(VReg, &RISCV::VRRegClass);
8478       for (const auto *RC :
8479            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8480         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8481           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8482           return std::make_pair(VReg, RC);
8483         }
8484       }
8485     }
8486   }
8487 
8488   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8489 }
8490 
8491 unsigned
8492 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8493   // Currently only support length 1 constraints.
8494   if (ConstraintCode.size() == 1) {
8495     switch (ConstraintCode[0]) {
8496     case 'A':
8497       return InlineAsm::Constraint_A;
8498     default:
8499       break;
8500     }
8501   }
8502 
8503   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8504 }
8505 
8506 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8507     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8508     SelectionDAG &DAG) const {
8509   // Currently only support length 1 constraints.
8510   if (Constraint.length() == 1) {
8511     switch (Constraint[0]) {
8512     case 'I':
8513       // Validate & create a 12-bit signed immediate operand.
8514       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8515         uint64_t CVal = C->getSExtValue();
8516         if (isInt<12>(CVal))
8517           Ops.push_back(
8518               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8519       }
8520       return;
8521     case 'J':
8522       // Validate & create an integer zero operand.
8523       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8524         if (C->getZExtValue() == 0)
8525           Ops.push_back(
8526               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8527       return;
8528     case 'K':
8529       // Validate & create a 5-bit unsigned immediate operand.
8530       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8531         uint64_t CVal = C->getZExtValue();
8532         if (isUInt<5>(CVal))
8533           Ops.push_back(
8534               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8535       }
8536       return;
8537     case 'S':
8538       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
8539         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
8540                                                  GA->getValueType(0)));
8541       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
8542         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
8543                                                 BA->getValueType(0)));
8544       }
8545       return;
8546     default:
8547       break;
8548     }
8549   }
8550   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8551 }
8552 
8553 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8554                                                    Instruction *Inst,
8555                                                    AtomicOrdering Ord) const {
8556   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8557     return Builder.CreateFence(Ord);
8558   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8559     return Builder.CreateFence(AtomicOrdering::Release);
8560   return nullptr;
8561 }
8562 
8563 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8564                                                     Instruction *Inst,
8565                                                     AtomicOrdering Ord) const {
8566   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8567     return Builder.CreateFence(AtomicOrdering::Acquire);
8568   return nullptr;
8569 }
8570 
8571 TargetLowering::AtomicExpansionKind
8572 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8573   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8574   // point operations can't be used in an lr/sc sequence without breaking the
8575   // forward-progress guarantee.
8576   if (AI->isFloatingPointOperation())
8577     return AtomicExpansionKind::CmpXChg;
8578 
8579   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8580   if (Size == 8 || Size == 16)
8581     return AtomicExpansionKind::MaskedIntrinsic;
8582   return AtomicExpansionKind::None;
8583 }
8584 
8585 static Intrinsic::ID
8586 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8587   if (XLen == 32) {
8588     switch (BinOp) {
8589     default:
8590       llvm_unreachable("Unexpected AtomicRMW BinOp");
8591     case AtomicRMWInst::Xchg:
8592       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8593     case AtomicRMWInst::Add:
8594       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8595     case AtomicRMWInst::Sub:
8596       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8597     case AtomicRMWInst::Nand:
8598       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8599     case AtomicRMWInst::Max:
8600       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8601     case AtomicRMWInst::Min:
8602       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8603     case AtomicRMWInst::UMax:
8604       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8605     case AtomicRMWInst::UMin:
8606       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8607     }
8608   }
8609 
8610   if (XLen == 64) {
8611     switch (BinOp) {
8612     default:
8613       llvm_unreachable("Unexpected AtomicRMW BinOp");
8614     case AtomicRMWInst::Xchg:
8615       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8616     case AtomicRMWInst::Add:
8617       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8618     case AtomicRMWInst::Sub:
8619       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8620     case AtomicRMWInst::Nand:
8621       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8622     case AtomicRMWInst::Max:
8623       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8624     case AtomicRMWInst::Min:
8625       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8626     case AtomicRMWInst::UMax:
8627       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8628     case AtomicRMWInst::UMin:
8629       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8630     }
8631   }
8632 
8633   llvm_unreachable("Unexpected XLen\n");
8634 }
8635 
8636 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8637     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8638     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8639   unsigned XLen = Subtarget.getXLen();
8640   Value *Ordering =
8641       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8642   Type *Tys[] = {AlignedAddr->getType()};
8643   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8644       AI->getModule(),
8645       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8646 
8647   if (XLen == 64) {
8648     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8649     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8650     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8651   }
8652 
8653   Value *Result;
8654 
8655   // Must pass the shift amount needed to sign extend the loaded value prior
8656   // to performing a signed comparison for min/max. ShiftAmt is the number of
8657   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8658   // is the number of bits to left+right shift the value in order to
8659   // sign-extend.
8660   if (AI->getOperation() == AtomicRMWInst::Min ||
8661       AI->getOperation() == AtomicRMWInst::Max) {
8662     const DataLayout &DL = AI->getModule()->getDataLayout();
8663     unsigned ValWidth =
8664         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8665     Value *SextShamt =
8666         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8667     Result = Builder.CreateCall(LrwOpScwLoop,
8668                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8669   } else {
8670     Result =
8671         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8672   }
8673 
8674   if (XLen == 64)
8675     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8676   return Result;
8677 }
8678 
8679 TargetLowering::AtomicExpansionKind
8680 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8681     AtomicCmpXchgInst *CI) const {
8682   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8683   if (Size == 8 || Size == 16)
8684     return AtomicExpansionKind::MaskedIntrinsic;
8685   return AtomicExpansionKind::None;
8686 }
8687 
8688 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8689     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8690     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8691   unsigned XLen = Subtarget.getXLen();
8692   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8693   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8694   if (XLen == 64) {
8695     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8696     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8697     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8698     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8699   }
8700   Type *Tys[] = {AlignedAddr->getType()};
8701   Function *MaskedCmpXchg =
8702       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8703   Value *Result = Builder.CreateCall(
8704       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8705   if (XLen == 64)
8706     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8707   return Result;
8708 }
8709 
8710 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8711   return false;
8712 }
8713 
8714 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8715                                                      EVT VT) const {
8716   VT = VT.getScalarType();
8717 
8718   if (!VT.isSimple())
8719     return false;
8720 
8721   switch (VT.getSimpleVT().SimpleTy) {
8722   case MVT::f16:
8723     return Subtarget.hasStdExtZfh();
8724   case MVT::f32:
8725     return Subtarget.hasStdExtF();
8726   case MVT::f64:
8727     return Subtarget.hasStdExtD();
8728   default:
8729     break;
8730   }
8731 
8732   return false;
8733 }
8734 
8735 Register RISCVTargetLowering::getExceptionPointerRegister(
8736     const Constant *PersonalityFn) const {
8737   return RISCV::X10;
8738 }
8739 
8740 Register RISCVTargetLowering::getExceptionSelectorRegister(
8741     const Constant *PersonalityFn) const {
8742   return RISCV::X11;
8743 }
8744 
8745 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8746   // Return false to suppress the unnecessary extensions if the LibCall
8747   // arguments or return value is f32 type for LP64 ABI.
8748   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8749   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8750     return false;
8751 
8752   return true;
8753 }
8754 
8755 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8756   if (Subtarget.is64Bit() && Type == MVT::i32)
8757     return true;
8758 
8759   return IsSigned;
8760 }
8761 
8762 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8763                                                  SDValue C) const {
8764   // Check integral scalar types.
8765   if (VT.isScalarInteger()) {
8766     // Omit the optimization if the sub target has the M extension and the data
8767     // size exceeds XLen.
8768     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8769       return false;
8770     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8771       // Break the MUL to a SLLI and an ADD/SUB.
8772       const APInt &Imm = ConstNode->getAPIntValue();
8773       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8774           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8775         return true;
8776       // Omit the following optimization if the sub target has the M extension
8777       // and the data size >= XLen.
8778       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8779         return false;
8780       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8781       // a pair of LUI/ADDI.
8782       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8783         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8784         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8785             (1 - ImmS).isPowerOf2())
8786         return true;
8787       }
8788     }
8789   }
8790 
8791   return false;
8792 }
8793 
8794 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8795     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8796     bool *Fast) const {
8797   if (!VT.isVector())
8798     return false;
8799 
8800   EVT ElemVT = VT.getVectorElementType();
8801   if (Alignment >= ElemVT.getStoreSize()) {
8802     if (Fast)
8803       *Fast = true;
8804     return true;
8805   }
8806 
8807   return false;
8808 }
8809 
8810 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8811     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8812     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8813   bool IsABIRegCopy = CC.hasValue();
8814   EVT ValueVT = Val.getValueType();
8815   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8816     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8817     // and cast to f32.
8818     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8819     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8820     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8821                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8822     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8823     Parts[0] = Val;
8824     return true;
8825   }
8826 
8827   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8828     LLVMContext &Context = *DAG.getContext();
8829     EVT ValueEltVT = ValueVT.getVectorElementType();
8830     EVT PartEltVT = PartVT.getVectorElementType();
8831     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8832     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8833     if (PartVTBitSize % ValueVTBitSize == 0) {
8834       // If the element types are different, bitcast to the same element type of
8835       // PartVT first.
8836       if (ValueEltVT != PartEltVT) {
8837         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8838         assert(Count != 0 && "The number of element should not be zero.");
8839         EVT SameEltTypeVT =
8840             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8841         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8842       }
8843       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8844                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8845       Parts[0] = Val;
8846       return true;
8847     }
8848   }
8849   return false;
8850 }
8851 
8852 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8853     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8854     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8855   bool IsABIRegCopy = CC.hasValue();
8856   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8857     SDValue Val = Parts[0];
8858 
8859     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8860     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8861     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8862     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8863     return Val;
8864   }
8865 
8866   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8867     LLVMContext &Context = *DAG.getContext();
8868     SDValue Val = Parts[0];
8869     EVT ValueEltVT = ValueVT.getVectorElementType();
8870     EVT PartEltVT = PartVT.getVectorElementType();
8871     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8872     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8873     if (PartVTBitSize % ValueVTBitSize == 0) {
8874       EVT SameEltTypeVT = ValueVT;
8875       // If the element types are different, convert it to the same element type
8876       // of PartVT.
8877       if (ValueEltVT != PartEltVT) {
8878         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8879         assert(Count != 0 && "The number of element should not be zero.");
8880         SameEltTypeVT =
8881             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8882       }
8883       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8884                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8885       if (ValueEltVT != PartEltVT)
8886         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8887       return Val;
8888     }
8889   }
8890   return SDValue();
8891 }
8892 
8893 #define GET_REGISTER_MATCHER
8894 #include "RISCVGenAsmMatcher.inc"
8895 
8896 Register
8897 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8898                                        const MachineFunction &MF) const {
8899   Register Reg = MatchRegisterAltName(RegName);
8900   if (Reg == RISCV::NoRegister)
8901     Reg = MatchRegisterName(RegName);
8902   if (Reg == RISCV::NoRegister)
8903     report_fatal_error(
8904         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8905   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8906   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8907     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8908                              StringRef(RegName) + "\"."));
8909   return Reg;
8910 }
8911 
8912 namespace llvm {
8913 namespace RISCVVIntrinsicsTable {
8914 
8915 #define GET_RISCVVIntrinsicsTable_IMPL
8916 #include "RISCVGenSearchableTables.inc"
8917 
8918 } // namespace RISCVVIntrinsicsTable
8919 
8920 } // namespace llvm
8921