1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     setOperationAction(ISD::LRINT, MVT::f16, Legal);
318     setOperationAction(ISD::LLRINT, MVT::f16, Legal);
319     setOperationAction(ISD::LROUND, MVT::f16, Legal);
320     setOperationAction(ISD::LLROUND, MVT::f16, Legal);
321     for (auto CC : FPCCToExpand)
322       setCondCodeAction(CC, MVT::f16, Expand);
323     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
324     setOperationAction(ISD::SELECT, MVT::f16, Custom);
325     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
326     for (auto Op : FPOpToExpand)
327       setOperationAction(Op, MVT::f16, Expand);
328   }
329 
330   if (Subtarget.hasStdExtF()) {
331     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
332     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
333     setOperationAction(ISD::LRINT, MVT::f32, Legal);
334     setOperationAction(ISD::LLRINT, MVT::f32, Legal);
335     setOperationAction(ISD::LROUND, MVT::f32, Legal);
336     setOperationAction(ISD::LLROUND, MVT::f32, Legal);
337     for (auto CC : FPCCToExpand)
338       setCondCodeAction(CC, MVT::f32, Expand);
339     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT, MVT::f32, Custom);
341     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
342     for (auto Op : FPOpToExpand)
343       setOperationAction(Op, MVT::f32, Expand);
344     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
345     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
346   }
347 
348   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
349     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
350 
351   if (Subtarget.hasStdExtD()) {
352     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
353     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
354     setOperationAction(ISD::LRINT, MVT::f64, Legal);
355     setOperationAction(ISD::LLRINT, MVT::f64, Legal);
356     setOperationAction(ISD::LROUND, MVT::f64, Legal);
357     setOperationAction(ISD::LLROUND, MVT::f64, Legal);
358     for (auto CC : FPCCToExpand)
359       setCondCodeAction(CC, MVT::f64, Expand);
360     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
361     setOperationAction(ISD::SELECT, MVT::f64, Custom);
362     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
363     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
364     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
365     for (auto Op : FPOpToExpand)
366       setOperationAction(Op, MVT::f64, Expand);
367     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
368     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
369   }
370 
371   if (Subtarget.is64Bit()) {
372     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
373     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
374     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
375     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
376   }
377 
378   if (Subtarget.hasStdExtF()) {
379     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
380     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
381   }
382 
383   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
384   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
385   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
386   setOperationAction(ISD::JumpTable, XLenVT, Custom);
387 
388   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
389 
390   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
391   // Unfortunately this can't be determined just from the ISA naming string.
392   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
393                      Subtarget.is64Bit() ? Legal : Custom);
394 
395   setOperationAction(ISD::TRAP, MVT::Other, Legal);
396   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
397   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
398   if (Subtarget.is64Bit())
399     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
400 
401   if (Subtarget.hasStdExtA()) {
402     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
403     setMinCmpXchgSizeInBits(32);
404   } else {
405     setMaxAtomicSizeInBitsSupported(0);
406   }
407 
408   setBooleanContents(ZeroOrOneBooleanContent);
409 
410   if (Subtarget.hasStdExtV()) {
411     setBooleanVectorContents(ZeroOrOneBooleanContent);
412 
413     setOperationAction(ISD::VSCALE, XLenVT, Custom);
414 
415     // RVV intrinsics may have illegal operands.
416     // We also need to custom legalize vmv.x.s.
417     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
418     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
419     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
420     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
421     if (Subtarget.is64Bit()) {
422       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
423     } else {
424       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
425       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
426     }
427 
428     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
429 
430     static unsigned IntegerVPOps[] = {
431         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
432         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
433         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
434 
435     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
436                                             ISD::VP_FMUL, ISD::VP_FDIV};
437 
438     if (!Subtarget.is64Bit()) {
439       // We must custom-lower certain vXi64 operations on RV32 due to the vector
440       // element type being illegal.
441       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
442       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
443 
444       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
445       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
446       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
447       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
448       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
449       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
450       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
451       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
452     }
453 
454     for (MVT VT : BoolVecVTs) {
455       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
456 
457       // Mask VTs are custom-expanded into a series of standard nodes
458       setOperationAction(ISD::TRUNCATE, VT, Custom);
459       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
460       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
461       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
462 
463       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
464       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
465 
466       setOperationAction(ISD::SELECT, VT, Custom);
467       setOperationAction(ISD::SELECT_CC, VT, Expand);
468       setOperationAction(ISD::VSELECT, VT, Expand);
469 
470       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
471       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
472       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
473 
474       // RVV has native int->float & float->int conversions where the
475       // element type sizes are within one power-of-two of each other. Any
476       // wider distances between type sizes have to be lowered as sequences
477       // which progressively narrow the gap in stages.
478       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
479       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
480       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
481       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
482 
483       // Expand all extending loads to types larger than this, and truncating
484       // stores from types larger than this.
485       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
486         setTruncStoreAction(OtherVT, VT, Expand);
487         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
488         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
489         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
490       }
491     }
492 
493     for (MVT VT : IntVecVTs) {
494       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
495       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
496 
497       setOperationAction(ISD::SMIN, VT, Legal);
498       setOperationAction(ISD::SMAX, VT, Legal);
499       setOperationAction(ISD::UMIN, VT, Legal);
500       setOperationAction(ISD::UMAX, VT, Legal);
501 
502       setOperationAction(ISD::ROTL, VT, Expand);
503       setOperationAction(ISD::ROTR, VT, Expand);
504 
505       // Custom-lower extensions and truncations from/to mask types.
506       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
507       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
508       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
509 
510       // RVV has native int->float & float->int conversions where the
511       // element type sizes are within one power-of-two of each other. Any
512       // wider distances between type sizes have to be lowered as sequences
513       // which progressively narrow the gap in stages.
514       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
515       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
516       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
517       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
518 
519       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
520       // nodes which truncate by one power of two at a time.
521       setOperationAction(ISD::TRUNCATE, VT, Custom);
522 
523       // Custom-lower insert/extract operations to simplify patterns.
524       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
525       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
526 
527       // Custom-lower reduction operations to set up the corresponding custom
528       // nodes' operands.
529       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
530       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
531       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
532       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
533       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
534       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
535       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
536       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
537 
538       for (unsigned VPOpc : IntegerVPOps)
539         setOperationAction(VPOpc, VT, Custom);
540 
541       setOperationAction(ISD::LOAD, VT, Custom);
542       setOperationAction(ISD::STORE, VT, Custom);
543 
544       setOperationAction(ISD::MLOAD, VT, Custom);
545       setOperationAction(ISD::MSTORE, VT, Custom);
546       setOperationAction(ISD::MGATHER, VT, Custom);
547       setOperationAction(ISD::MSCATTER, VT, Custom);
548 
549       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
550       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
551       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
552 
553       setOperationAction(ISD::SELECT, VT, Custom);
554       setOperationAction(ISD::SELECT_CC, VT, Expand);
555 
556       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
557       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
558 
559       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
560         setTruncStoreAction(VT, OtherVT, Expand);
561         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
562         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
563         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
564       }
565     }
566 
567     // Expand various CCs to best match the RVV ISA, which natively supports UNE
568     // but no other unordered comparisons, and supports all ordered comparisons
569     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
570     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
571     // and we pattern-match those back to the "original", swapping operands once
572     // more. This way we catch both operations and both "vf" and "fv" forms with
573     // fewer patterns.
574     ISD::CondCode VFPCCToExpand[] = {
575         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
576         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
577         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
578     };
579 
580     // Sets common operation actions on RVV floating-point vector types.
581     const auto SetCommonVFPActions = [&](MVT VT) {
582       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
583       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
584       // sizes are within one power-of-two of each other. Therefore conversions
585       // between vXf16 and vXf64 must be lowered as sequences which convert via
586       // vXf32.
587       setOperationAction(ISD::FP_ROUND, VT, Custom);
588       setOperationAction(ISD::FP_EXTEND, VT, Custom);
589       // Custom-lower insert/extract operations to simplify patterns.
590       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
591       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
592       // Expand various condition codes (explained above).
593       for (auto CC : VFPCCToExpand)
594         setCondCodeAction(CC, VT, Expand);
595 
596       setOperationAction(ISD::FMINNUM, VT, Legal);
597       setOperationAction(ISD::FMAXNUM, VT, Legal);
598 
599       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
600       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
601       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
602       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
603       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
604 
605       setOperationAction(ISD::LOAD, VT, Custom);
606       setOperationAction(ISD::STORE, VT, Custom);
607 
608       setOperationAction(ISD::MLOAD, VT, Custom);
609       setOperationAction(ISD::MSTORE, VT, Custom);
610       setOperationAction(ISD::MGATHER, VT, Custom);
611       setOperationAction(ISD::MSCATTER, VT, Custom);
612 
613       setOperationAction(ISD::SELECT, VT, Custom);
614       setOperationAction(ISD::SELECT_CC, VT, Expand);
615 
616       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
617       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
618       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
619 
620       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
621 
622       for (unsigned VPOpc : FloatingPointVPOps)
623         setOperationAction(VPOpc, VT, Custom);
624     };
625 
626     // Sets common extload/truncstore actions on RVV floating-point vector
627     // types.
628     const auto SetCommonVFPExtLoadTruncStoreActions =
629         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
630           for (auto SmallVT : SmallerVTs) {
631             setTruncStoreAction(VT, SmallVT, Expand);
632             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
633           }
634         };
635 
636     if (Subtarget.hasStdExtZfh())
637       for (MVT VT : F16VecVTs)
638         SetCommonVFPActions(VT);
639 
640     for (MVT VT : F32VecVTs) {
641       if (Subtarget.hasStdExtF())
642         SetCommonVFPActions(VT);
643       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
644     }
645 
646     for (MVT VT : F64VecVTs) {
647       if (Subtarget.hasStdExtD())
648         SetCommonVFPActions(VT);
649       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
650       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
651     }
652 
653     if (Subtarget.useRVVForFixedLengthVectors()) {
654       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
655         if (!useRVVForFixedLengthVectorVT(VT))
656           continue;
657 
658         // By default everything must be expanded.
659         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
660           setOperationAction(Op, VT, Expand);
661         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
662           setTruncStoreAction(VT, OtherVT, Expand);
663           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
664           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
665           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
666         }
667 
668         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
669         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
670         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
671 
672         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
673         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
674 
675         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
676         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
677 
678         setOperationAction(ISD::LOAD, VT, Custom);
679         setOperationAction(ISD::STORE, VT, Custom);
680 
681         setOperationAction(ISD::SETCC, VT, Custom);
682 
683         setOperationAction(ISD::SELECT, VT, Custom);
684 
685         setOperationAction(ISD::TRUNCATE, VT, Custom);
686 
687         setOperationAction(ISD::BITCAST, VT, Custom);
688 
689         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
690         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
691         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
692 
693         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
694         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
695         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
696         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
697 
698         // Operations below are different for between masks and other vectors.
699         if (VT.getVectorElementType() == MVT::i1) {
700           setOperationAction(ISD::AND, VT, Custom);
701           setOperationAction(ISD::OR, VT, Custom);
702           setOperationAction(ISD::XOR, VT, Custom);
703           continue;
704         }
705 
706         // Use SPLAT_VECTOR to prevent type legalization from destroying the
707         // splats when type legalizing i64 scalar on RV32.
708         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
709         // improvements first.
710         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
711           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
712           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
713         }
714 
715         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
716         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
717 
718         setOperationAction(ISD::MLOAD, VT, Custom);
719         setOperationAction(ISD::MSTORE, VT, Custom);
720         setOperationAction(ISD::MGATHER, VT, Custom);
721         setOperationAction(ISD::MSCATTER, VT, Custom);
722         setOperationAction(ISD::ADD, VT, Custom);
723         setOperationAction(ISD::MUL, VT, Custom);
724         setOperationAction(ISD::SUB, VT, Custom);
725         setOperationAction(ISD::AND, VT, Custom);
726         setOperationAction(ISD::OR, VT, Custom);
727         setOperationAction(ISD::XOR, VT, Custom);
728         setOperationAction(ISD::SDIV, VT, Custom);
729         setOperationAction(ISD::SREM, VT, Custom);
730         setOperationAction(ISD::UDIV, VT, Custom);
731         setOperationAction(ISD::UREM, VT, Custom);
732         setOperationAction(ISD::SHL, VT, Custom);
733         setOperationAction(ISD::SRA, VT, Custom);
734         setOperationAction(ISD::SRL, VT, Custom);
735 
736         setOperationAction(ISD::SMIN, VT, Custom);
737         setOperationAction(ISD::SMAX, VT, Custom);
738         setOperationAction(ISD::UMIN, VT, Custom);
739         setOperationAction(ISD::UMAX, VT, Custom);
740         setOperationAction(ISD::ABS,  VT, Custom);
741 
742         setOperationAction(ISD::MULHS, VT, Custom);
743         setOperationAction(ISD::MULHU, VT, Custom);
744 
745         setOperationAction(ISD::VSELECT, VT, Custom);
746         setOperationAction(ISD::SELECT_CC, VT, Expand);
747 
748         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
749         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
750         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
751 
752         // Custom-lower reduction operations to set up the corresponding custom
753         // nodes' operands.
754         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
755         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
756         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
757         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
758         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
759 
760         for (unsigned VPOpc : IntegerVPOps)
761           setOperationAction(VPOpc, VT, Custom);
762       }
763 
764       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
765         if (!useRVVForFixedLengthVectorVT(VT))
766           continue;
767 
768         // By default everything must be expanded.
769         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
770           setOperationAction(Op, VT, Expand);
771         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
772           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
773           setTruncStoreAction(VT, OtherVT, Expand);
774         }
775 
776         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
777         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
778         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
779 
780         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
781         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
782         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
783         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
784 
785         setOperationAction(ISD::LOAD, VT, Custom);
786         setOperationAction(ISD::STORE, VT, Custom);
787         setOperationAction(ISD::MLOAD, VT, Custom);
788         setOperationAction(ISD::MSTORE, VT, Custom);
789         setOperationAction(ISD::MGATHER, VT, Custom);
790         setOperationAction(ISD::MSCATTER, VT, Custom);
791         setOperationAction(ISD::FADD, VT, Custom);
792         setOperationAction(ISD::FSUB, VT, Custom);
793         setOperationAction(ISD::FMUL, VT, Custom);
794         setOperationAction(ISD::FDIV, VT, Custom);
795         setOperationAction(ISD::FNEG, VT, Custom);
796         setOperationAction(ISD::FABS, VT, Custom);
797         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
798         setOperationAction(ISD::FSQRT, VT, Custom);
799         setOperationAction(ISD::FMA, VT, Custom);
800         setOperationAction(ISD::FMINNUM, VT, Custom);
801         setOperationAction(ISD::FMAXNUM, VT, Custom);
802 
803         setOperationAction(ISD::FP_ROUND, VT, Custom);
804         setOperationAction(ISD::FP_EXTEND, VT, Custom);
805 
806         for (auto CC : VFPCCToExpand)
807           setCondCodeAction(CC, VT, Expand);
808 
809         setOperationAction(ISD::VSELECT, VT, Custom);
810         setOperationAction(ISD::SELECT, VT, Custom);
811         setOperationAction(ISD::SELECT_CC, VT, Expand);
812 
813         setOperationAction(ISD::BITCAST, VT, Custom);
814 
815         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
816         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
817         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
818         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
819 
820         for (unsigned VPOpc : FloatingPointVPOps)
821           setOperationAction(VPOpc, VT, Custom);
822       }
823 
824       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
825       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
826       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
827       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
828       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
829       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
830       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
831       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
832     }
833   }
834 
835   // Function alignments.
836   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
837   setMinFunctionAlignment(FunctionAlignment);
838   setPrefFunctionAlignment(FunctionAlignment);
839 
840   setMinimumJumpTableEntries(5);
841 
842   // Jumps are expensive, compared to logic
843   setJumpIsExpensive();
844 
845   // We can use any register for comparisons
846   setHasMultipleConditionRegisters();
847 
848   setTargetDAGCombine(ISD::AND);
849   setTargetDAGCombine(ISD::OR);
850   setTargetDAGCombine(ISD::XOR);
851   setTargetDAGCombine(ISD::ANY_EXTEND);
852   if (Subtarget.hasStdExtV()) {
853     setTargetDAGCombine(ISD::FCOPYSIGN);
854     setTargetDAGCombine(ISD::MGATHER);
855     setTargetDAGCombine(ISD::MSCATTER);
856     setTargetDAGCombine(ISD::SRA);
857     setTargetDAGCombine(ISD::SRL);
858     setTargetDAGCombine(ISD::SHL);
859   }
860 }
861 
862 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
863                                             LLVMContext &Context,
864                                             EVT VT) const {
865   if (!VT.isVector())
866     return getPointerTy(DL);
867   if (Subtarget.hasStdExtV() &&
868       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
869     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
870   return VT.changeVectorElementTypeToInteger();
871 }
872 
873 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
874   return Subtarget.getXLenVT();
875 }
876 
877 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
878                                              const CallInst &I,
879                                              MachineFunction &MF,
880                                              unsigned Intrinsic) const {
881   switch (Intrinsic) {
882   default:
883     return false;
884   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
885   case Intrinsic::riscv_masked_atomicrmw_add_i32:
886   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
887   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
888   case Intrinsic::riscv_masked_atomicrmw_max_i32:
889   case Intrinsic::riscv_masked_atomicrmw_min_i32:
890   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
891   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
892   case Intrinsic::riscv_masked_cmpxchg_i32: {
893     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
894     Info.opc = ISD::INTRINSIC_W_CHAIN;
895     Info.memVT = MVT::getVT(PtrTy->getElementType());
896     Info.ptrVal = I.getArgOperand(0);
897     Info.offset = 0;
898     Info.align = Align(4);
899     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
900                  MachineMemOperand::MOVolatile;
901     return true;
902   }
903   }
904 }
905 
906 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
907                                                 const AddrMode &AM, Type *Ty,
908                                                 unsigned AS,
909                                                 Instruction *I) const {
910   // No global is ever allowed as a base.
911   if (AM.BaseGV)
912     return false;
913 
914   // Require a 12-bit signed offset.
915   if (!isInt<12>(AM.BaseOffs))
916     return false;
917 
918   switch (AM.Scale) {
919   case 0: // "r+i" or just "i", depending on HasBaseReg.
920     break;
921   case 1:
922     if (!AM.HasBaseReg) // allow "r+i".
923       break;
924     return false; // disallow "r+r" or "r+r+i".
925   default:
926     return false;
927   }
928 
929   return true;
930 }
931 
932 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
933   return isInt<12>(Imm);
934 }
935 
936 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
937   return isInt<12>(Imm);
938 }
939 
940 // On RV32, 64-bit integers are split into their high and low parts and held
941 // in two different registers, so the trunc is free since the low register can
942 // just be used.
943 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
944   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
945     return false;
946   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
947   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
948   return (SrcBits == 64 && DestBits == 32);
949 }
950 
951 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
952   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
953       !SrcVT.isInteger() || !DstVT.isInteger())
954     return false;
955   unsigned SrcBits = SrcVT.getSizeInBits();
956   unsigned DestBits = DstVT.getSizeInBits();
957   return (SrcBits == 64 && DestBits == 32);
958 }
959 
960 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
961   // Zexts are free if they can be combined with a load.
962   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
963     EVT MemVT = LD->getMemoryVT();
964     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
965          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
966         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
967          LD->getExtensionType() == ISD::ZEXTLOAD))
968       return true;
969   }
970 
971   return TargetLowering::isZExtFree(Val, VT2);
972 }
973 
974 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
975   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
976 }
977 
978 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
979   return Subtarget.hasStdExtZbb();
980 }
981 
982 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
983   return Subtarget.hasStdExtZbb();
984 }
985 
986 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
987                                        bool ForCodeSize) const {
988   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
989     return false;
990   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
991     return false;
992   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
993     return false;
994   if (Imm.isNegZero())
995     return false;
996   return Imm.isZero();
997 }
998 
999 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1000   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1001          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1002          (VT == MVT::f64 && Subtarget.hasStdExtD());
1003 }
1004 
1005 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1006                                                       CallingConv::ID CC,
1007                                                       EVT VT) const {
1008   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1009   // end up using a GPR but that will be decided based on ABI.
1010   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1011     return MVT::f32;
1012 
1013   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1014 }
1015 
1016 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1017                                                            CallingConv::ID CC,
1018                                                            EVT VT) const {
1019   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1020   // end up using a GPR but that will be decided based on ABI.
1021   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1022     return 1;
1023 
1024   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1025 }
1026 
1027 // Changes the condition code and swaps operands if necessary, so the SetCC
1028 // operation matches one of the comparisons supported directly by branches
1029 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1030 // with 1/-1.
1031 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1032                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1033   // Convert X > -1 to X >= 0.
1034   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1035     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1036     CC = ISD::SETGE;
1037     return;
1038   }
1039   // Convert X < 1 to 0 >= X.
1040   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1041     RHS = LHS;
1042     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1043     CC = ISD::SETGE;
1044     return;
1045   }
1046 
1047   switch (CC) {
1048   default:
1049     break;
1050   case ISD::SETGT:
1051   case ISD::SETLE:
1052   case ISD::SETUGT:
1053   case ISD::SETULE:
1054     CC = ISD::getSetCCSwappedOperands(CC);
1055     std::swap(LHS, RHS);
1056     break;
1057   }
1058 }
1059 
1060 // Return the RISC-V branch opcode that matches the given DAG integer
1061 // condition code. The CondCode must be one of those supported by the RISC-V
1062 // ISA (see translateSetCCForBranch).
1063 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1064   switch (CC) {
1065   default:
1066     llvm_unreachable("Unsupported CondCode");
1067   case ISD::SETEQ:
1068     return RISCV::BEQ;
1069   case ISD::SETNE:
1070     return RISCV::BNE;
1071   case ISD::SETLT:
1072     return RISCV::BLT;
1073   case ISD::SETGE:
1074     return RISCV::BGE;
1075   case ISD::SETULT:
1076     return RISCV::BLTU;
1077   case ISD::SETUGE:
1078     return RISCV::BGEU;
1079   }
1080 }
1081 
1082 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1083   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1084   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1085   if (VT.getVectorElementType() == MVT::i1)
1086     KnownSize *= 8;
1087 
1088   switch (KnownSize) {
1089   default:
1090     llvm_unreachable("Invalid LMUL.");
1091   case 8:
1092     return RISCVII::VLMUL::LMUL_F8;
1093   case 16:
1094     return RISCVII::VLMUL::LMUL_F4;
1095   case 32:
1096     return RISCVII::VLMUL::LMUL_F2;
1097   case 64:
1098     return RISCVII::VLMUL::LMUL_1;
1099   case 128:
1100     return RISCVII::VLMUL::LMUL_2;
1101   case 256:
1102     return RISCVII::VLMUL::LMUL_4;
1103   case 512:
1104     return RISCVII::VLMUL::LMUL_8;
1105   }
1106 }
1107 
1108 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1109   switch (LMul) {
1110   default:
1111     llvm_unreachable("Invalid LMUL.");
1112   case RISCVII::VLMUL::LMUL_F8:
1113   case RISCVII::VLMUL::LMUL_F4:
1114   case RISCVII::VLMUL::LMUL_F2:
1115   case RISCVII::VLMUL::LMUL_1:
1116     return RISCV::VRRegClassID;
1117   case RISCVII::VLMUL::LMUL_2:
1118     return RISCV::VRM2RegClassID;
1119   case RISCVII::VLMUL::LMUL_4:
1120     return RISCV::VRM4RegClassID;
1121   case RISCVII::VLMUL::LMUL_8:
1122     return RISCV::VRM8RegClassID;
1123   }
1124 }
1125 
1126 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1127   RISCVII::VLMUL LMUL = getLMUL(VT);
1128   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1129       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1130       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1131       LMUL == RISCVII::VLMUL::LMUL_1) {
1132     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1133                   "Unexpected subreg numbering");
1134     return RISCV::sub_vrm1_0 + Index;
1135   }
1136   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1137     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1138                   "Unexpected subreg numbering");
1139     return RISCV::sub_vrm2_0 + Index;
1140   }
1141   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1142     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1143                   "Unexpected subreg numbering");
1144     return RISCV::sub_vrm4_0 + Index;
1145   }
1146   llvm_unreachable("Invalid vector type.");
1147 }
1148 
1149 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1150   if (VT.getVectorElementType() == MVT::i1)
1151     return RISCV::VRRegClassID;
1152   return getRegClassIDForLMUL(getLMUL(VT));
1153 }
1154 
1155 // Attempt to decompose a subvector insert/extract between VecVT and
1156 // SubVecVT via subregister indices. Returns the subregister index that
1157 // can perform the subvector insert/extract with the given element index, as
1158 // well as the index corresponding to any leftover subvectors that must be
1159 // further inserted/extracted within the register class for SubVecVT.
1160 std::pair<unsigned, unsigned>
1161 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1162     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1163     const RISCVRegisterInfo *TRI) {
1164   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1165                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1166                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1167                 "Register classes not ordered");
1168   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1169   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1170   // Try to compose a subregister index that takes us from the incoming
1171   // LMUL>1 register class down to the outgoing one. At each step we half
1172   // the LMUL:
1173   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1174   // Note that this is not guaranteed to find a subregister index, such as
1175   // when we are extracting from one VR type to another.
1176   unsigned SubRegIdx = RISCV::NoSubRegister;
1177   for (const unsigned RCID :
1178        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1179     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1180       VecVT = VecVT.getHalfNumVectorElementsVT();
1181       bool IsHi =
1182           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1183       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1184                                             getSubregIndexByMVT(VecVT, IsHi));
1185       if (IsHi)
1186         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1187     }
1188   return {SubRegIdx, InsertExtractIdx};
1189 }
1190 
1191 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1192 // stores for those types.
1193 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1194   return !Subtarget.useRVVForFixedLengthVectors() ||
1195          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1196 }
1197 
1198 static bool useRVVForFixedLengthVectorVT(MVT VT,
1199                                          const RISCVSubtarget &Subtarget) {
1200   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1201   if (!Subtarget.useRVVForFixedLengthVectors())
1202     return false;
1203 
1204   // We only support a set of vector types with a consistent maximum fixed size
1205   // across all supported vector element types to avoid legalization issues.
1206   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1207   // fixed-length vector type we support is 1024 bytes.
1208   if (VT.getFixedSizeInBits() > 1024 * 8)
1209     return false;
1210 
1211   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1212 
1213   // Don't use RVV for vectors we cannot scalarize if required.
1214   switch (VT.getVectorElementType().SimpleTy) {
1215   // i1 is supported but has different rules.
1216   default:
1217     return false;
1218   case MVT::i1:
1219     // Masks can only use a single register.
1220     if (VT.getVectorNumElements() > MinVLen)
1221       return false;
1222     MinVLen /= 8;
1223     break;
1224   case MVT::i8:
1225   case MVT::i16:
1226   case MVT::i32:
1227   case MVT::i64:
1228     break;
1229   case MVT::f16:
1230     if (!Subtarget.hasStdExtZfh())
1231       return false;
1232     break;
1233   case MVT::f32:
1234     if (!Subtarget.hasStdExtF())
1235       return false;
1236     break;
1237   case MVT::f64:
1238     if (!Subtarget.hasStdExtD())
1239       return false;
1240     break;
1241   }
1242 
1243   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1244   // Don't use RVV for types that don't fit.
1245   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1246     return false;
1247 
1248   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1249   // the base fixed length RVV support in place.
1250   if (!VT.isPow2VectorType())
1251     return false;
1252 
1253   return true;
1254 }
1255 
1256 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1257   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1258 }
1259 
1260 // Return the largest legal scalable vector type that matches VT's element type.
1261 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1262                                             const RISCVSubtarget &Subtarget) {
1263   // This may be called before legal types are setup.
1264   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1265           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1266          "Expected legal fixed length vector!");
1267 
1268   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1269 
1270   MVT EltVT = VT.getVectorElementType();
1271   switch (EltVT.SimpleTy) {
1272   default:
1273     llvm_unreachable("unexpected element type for RVV container");
1274   case MVT::i1:
1275   case MVT::i8:
1276   case MVT::i16:
1277   case MVT::i32:
1278   case MVT::i64:
1279   case MVT::f16:
1280   case MVT::f32:
1281   case MVT::f64: {
1282     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1283     // narrower types, but we can't have a fractional LMUL with demoninator less
1284     // than 64/SEW.
1285     unsigned NumElts =
1286         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1287     return MVT::getScalableVectorVT(EltVT, NumElts);
1288   }
1289   }
1290 }
1291 
1292 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1293                                             const RISCVSubtarget &Subtarget) {
1294   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1295                                           Subtarget);
1296 }
1297 
1298 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1299   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1300 }
1301 
1302 // Grow V to consume an entire RVV register.
1303 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1304                                        const RISCVSubtarget &Subtarget) {
1305   assert(VT.isScalableVector() &&
1306          "Expected to convert into a scalable vector!");
1307   assert(V.getValueType().isFixedLengthVector() &&
1308          "Expected a fixed length vector operand!");
1309   SDLoc DL(V);
1310   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1311   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1312 }
1313 
1314 // Shrink V so it's just big enough to maintain a VT's worth of data.
1315 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1316                                          const RISCVSubtarget &Subtarget) {
1317   assert(VT.isFixedLengthVector() &&
1318          "Expected to convert into a fixed length vector!");
1319   assert(V.getValueType().isScalableVector() &&
1320          "Expected a scalable vector operand!");
1321   SDLoc DL(V);
1322   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1323   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1324 }
1325 
1326 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1327 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1328 // the vector type that it is contained in.
1329 static std::pair<SDValue, SDValue>
1330 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1331                 const RISCVSubtarget &Subtarget) {
1332   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1333   MVT XLenVT = Subtarget.getXLenVT();
1334   SDValue VL = VecVT.isFixedLengthVector()
1335                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1336                    : DAG.getRegister(RISCV::X0, XLenVT);
1337   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1338   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1339   return {Mask, VL};
1340 }
1341 
1342 // As above but assuming the given type is a scalable vector type.
1343 static std::pair<SDValue, SDValue>
1344 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1345                         const RISCVSubtarget &Subtarget) {
1346   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1347   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1348 }
1349 
1350 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1351 // of either is (currently) supported. This can get us into an infinite loop
1352 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1353 // as a ..., etc.
1354 // Until either (or both) of these can reliably lower any node, reporting that
1355 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1356 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1357 // which is not desirable.
1358 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1359     EVT VT, unsigned DefinedValues) const {
1360   return false;
1361 }
1362 
1363 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1364   // Only splats are currently supported.
1365   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1366     return true;
1367 
1368   return false;
1369 }
1370 
1371 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1372                                  const RISCVSubtarget &Subtarget) {
1373   MVT VT = Op.getSimpleValueType();
1374   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1375 
1376   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1377 
1378   SDLoc DL(Op);
1379   SDValue Mask, VL;
1380   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1381 
1382   unsigned Opc =
1383       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1384   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1385   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1386 }
1387 
1388 struct VIDSequence {
1389   int64_t Step;
1390   int64_t Addend;
1391 };
1392 
1393 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1394 // to the (non-zero) step S and start value X. This can be then lowered as the
1395 // RVV sequence (VID * S) + X, for example.
1396 // Note that this method will also match potentially unappealing index
1397 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1398 // determine whether this is worth generating code for.
1399 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1400   unsigned NumElts = Op.getNumOperands();
1401   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1402   if (!Op.getValueType().isInteger())
1403     return None;
1404 
1405   Optional<int64_t> SeqStep, SeqAddend;
1406   Optional<std::pair<uint64_t, unsigned>> PrevElt;
1407   unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1408   for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1409     // Assume undef elements match the sequence; we just have to be careful
1410     // when interpolating across them.
1411     if (Op.getOperand(Idx).isUndef())
1412       continue;
1413     // The BUILD_VECTOR must be all constants.
1414     if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1415       return None;
1416 
1417     uint64_t Val = Op.getConstantOperandVal(Idx) &
1418                    maskTrailingOnes<uint64_t>(EltSizeInBits);
1419 
1420     if (PrevElt) {
1421       // Calculate the step since the last non-undef element, and ensure
1422       // it's consistent across the entire sequence.
1423       int64_t Diff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1424       // The difference must cleanly divide the element span.
1425       if (Diff % (Idx - PrevElt->second) != 0)
1426         return None;
1427       int64_t Step = Diff / (Idx - PrevElt->second);
1428       // A zero step indicates we're either a not an index sequence, or we
1429       // have a fractional step. This must be handled by a more complex
1430       // pattern recognition (undefs complicate things here).
1431       if (Step == 0)
1432         return None;
1433       if (!SeqStep)
1434         SeqStep = Step;
1435       else if (Step != SeqStep)
1436         return None;
1437     }
1438 
1439     // Record and/or check any addend.
1440     if (SeqStep) {
1441       int64_t Addend =
1442           SignExtend64(Val - (Idx * (uint64_t)*SeqStep), EltSizeInBits);
1443       if (!SeqAddend)
1444         SeqAddend = Addend;
1445       else if (SeqAddend != Addend)
1446         return None;
1447     }
1448 
1449     // Record this non-undef element for later.
1450     PrevElt = std::make_pair(Val, Idx);
1451   }
1452   // We need to have logged both a step and an addend for this to count as
1453   // a legal index sequence.
1454   if (!SeqStep || !SeqAddend)
1455     return None;
1456 
1457   return VIDSequence{*SeqStep, *SeqAddend};
1458 }
1459 
1460 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1461                                  const RISCVSubtarget &Subtarget) {
1462   MVT VT = Op.getSimpleValueType();
1463   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1464 
1465   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1466 
1467   SDLoc DL(Op);
1468   SDValue Mask, VL;
1469   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1470 
1471   MVT XLenVT = Subtarget.getXLenVT();
1472   unsigned NumElts = Op.getNumOperands();
1473 
1474   if (VT.getVectorElementType() == MVT::i1) {
1475     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1476       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1477       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1478     }
1479 
1480     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1481       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1482       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1483     }
1484 
1485     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1486     // scalar integer chunks whose bit-width depends on the number of mask
1487     // bits and XLEN.
1488     // First, determine the most appropriate scalar integer type to use. This
1489     // is at most XLenVT, but may be shrunk to a smaller vector element type
1490     // according to the size of the final vector - use i8 chunks rather than
1491     // XLenVT if we're producing a v8i1. This results in more consistent
1492     // codegen across RV32 and RV64.
1493     unsigned NumViaIntegerBits =
1494         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1495     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1496       // If we have to use more than one INSERT_VECTOR_ELT then this
1497       // optimization is likely to increase code size; avoid peforming it in
1498       // such a case. We can use a load from a constant pool in this case.
1499       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1500         return SDValue();
1501       // Now we can create our integer vector type. Note that it may be larger
1502       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1503       MVT IntegerViaVecVT =
1504           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1505                            divideCeil(NumElts, NumViaIntegerBits));
1506 
1507       uint64_t Bits = 0;
1508       unsigned BitPos = 0, IntegerEltIdx = 0;
1509       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1510 
1511       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1512         // Once we accumulate enough bits to fill our scalar type, insert into
1513         // our vector and clear our accumulated data.
1514         if (I != 0 && I % NumViaIntegerBits == 0) {
1515           if (NumViaIntegerBits <= 32)
1516             Bits = SignExtend64(Bits, 32);
1517           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1518           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1519                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1520           Bits = 0;
1521           BitPos = 0;
1522           IntegerEltIdx++;
1523         }
1524         SDValue V = Op.getOperand(I);
1525         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1526         Bits |= ((uint64_t)BitValue << BitPos);
1527       }
1528 
1529       // Insert the (remaining) scalar value into position in our integer
1530       // vector type.
1531       if (NumViaIntegerBits <= 32)
1532         Bits = SignExtend64(Bits, 32);
1533       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1534       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1535                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1536 
1537       if (NumElts < NumViaIntegerBits) {
1538         // If we're producing a smaller vector than our minimum legal integer
1539         // type, bitcast to the equivalent (known-legal) mask type, and extract
1540         // our final mask.
1541         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1542         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1543         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1544                           DAG.getConstant(0, DL, XLenVT));
1545       } else {
1546         // Else we must have produced an integer type with the same size as the
1547         // mask type; bitcast for the final result.
1548         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1549         Vec = DAG.getBitcast(VT, Vec);
1550       }
1551 
1552       return Vec;
1553     }
1554 
1555     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1556     // vector type, we have a legal equivalently-sized i8 type, so we can use
1557     // that.
1558     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1559     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1560 
1561     SDValue WideVec;
1562     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1563       // For a splat, perform a scalar truncate before creating the wider
1564       // vector.
1565       assert(Splat.getValueType() == XLenVT &&
1566              "Unexpected type for i1 splat value");
1567       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1568                           DAG.getConstant(1, DL, XLenVT));
1569       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1570     } else {
1571       SmallVector<SDValue, 8> Ops(Op->op_values());
1572       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1573       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1574       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1575     }
1576 
1577     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1578   }
1579 
1580   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1581     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1582                                         : RISCVISD::VMV_V_X_VL;
1583     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1584     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1585   }
1586 
1587   // Try and match index sequences, which we can lower to the vid instruction
1588   // with optional modifications. An all-undef vector is matched by
1589   // getSplatValue, above.
1590   if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1591     int64_t Step = SimpleVID->Step;
1592     int64_t Addend = SimpleVID->Addend;
1593     // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1594     // threshold since it's the immediate value many RVV instructions accept.
1595     if (isInt<5>(Step) && isInt<5>(Addend)) {
1596       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1597       // Convert right out of the scalable type so we can use standard ISD
1598       // nodes for the rest of the computation. If we used scalable types with
1599       // these, we'd lose the fixed-length vector info and generate worse
1600       // vsetvli code.
1601       VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1602       assert(Step != 0 && "Invalid step");
1603       bool Negate = false;
1604       if (Step != 1) {
1605         int64_t SplatStepVal = Step;
1606         unsigned Opcode = ISD::MUL;
1607         if (isPowerOf2_64(std::abs(Step))) {
1608           Negate = Step < 0;
1609           Opcode = ISD::SHL;
1610           SplatStepVal = Log2_64(std::abs(Step));
1611         }
1612         SDValue SplatStep = DAG.getSplatVector(
1613             VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1614         VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1615       }
1616       if (Addend != 0 || Negate) {
1617         SDValue SplatAddend =
1618             DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1619         VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1620       }
1621       return VID;
1622     }
1623   }
1624 
1625   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1626   // when re-interpreted as a vector with a larger element type. For example,
1627   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1628   // could be instead splat as
1629   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1630   // TODO: This optimization could also work on non-constant splats, but it
1631   // would require bit-manipulation instructions to construct the splat value.
1632   SmallVector<SDValue> Sequence;
1633   unsigned EltBitSize = VT.getScalarSizeInBits();
1634   const auto *BV = cast<BuildVectorSDNode>(Op);
1635   if (VT.isInteger() && EltBitSize < 64 &&
1636       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1637       BV->getRepeatedSequence(Sequence) &&
1638       (Sequence.size() * EltBitSize) <= 64) {
1639     unsigned SeqLen = Sequence.size();
1640     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1641     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1642     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1643             ViaIntVT == MVT::i64) &&
1644            "Unexpected sequence type");
1645 
1646     unsigned EltIdx = 0;
1647     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1648     uint64_t SplatValue = 0;
1649     // Construct the amalgamated value which can be splatted as this larger
1650     // vector type.
1651     for (const auto &SeqV : Sequence) {
1652       if (!SeqV.isUndef())
1653         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1654                        << (EltIdx * EltBitSize));
1655       EltIdx++;
1656     }
1657 
1658     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1659     // achieve better constant materializion.
1660     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1661       SplatValue = SignExtend64(SplatValue, 32);
1662 
1663     // Since we can't introduce illegal i64 types at this stage, we can only
1664     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1665     // way we can use RVV instructions to splat.
1666     assert((ViaIntVT.bitsLE(XLenVT) ||
1667             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1668            "Unexpected bitcast sequence");
1669     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1670       SDValue ViaVL =
1671           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1672       MVT ViaContainerVT =
1673           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1674       SDValue Splat =
1675           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1676                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1677       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1678       return DAG.getBitcast(VT, Splat);
1679     }
1680   }
1681 
1682   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1683   // which constitute a large proportion of the elements. In such cases we can
1684   // splat a vector with the dominant element and make up the shortfall with
1685   // INSERT_VECTOR_ELTs.
1686   // Note that this includes vectors of 2 elements by association. The
1687   // upper-most element is the "dominant" one, allowing us to use a splat to
1688   // "insert" the upper element, and an insert of the lower element at position
1689   // 0, which improves codegen.
1690   SDValue DominantValue;
1691   unsigned MostCommonCount = 0;
1692   DenseMap<SDValue, unsigned> ValueCounts;
1693   unsigned NumUndefElts =
1694       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1695 
1696   for (SDValue V : Op->op_values()) {
1697     if (V.isUndef())
1698       continue;
1699 
1700     ValueCounts.insert(std::make_pair(V, 0));
1701     unsigned &Count = ValueCounts[V];
1702 
1703     // Is this value dominant? In case of a tie, prefer the highest element as
1704     // it's cheaper to insert near the beginning of a vector than it is at the
1705     // end.
1706     if (++Count >= MostCommonCount) {
1707       DominantValue = V;
1708       MostCommonCount = Count;
1709     }
1710   }
1711 
1712   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1713   unsigned NumDefElts = NumElts - NumUndefElts;
1714   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1715 
1716   // Don't perform this optimization when optimizing for size, since
1717   // materializing elements and inserting them tends to cause code bloat.
1718   if (!DAG.shouldOptForSize() &&
1719       ((MostCommonCount > DominantValueCountThreshold) ||
1720        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1721     // Start by splatting the most common element.
1722     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1723 
1724     DenseSet<SDValue> Processed{DominantValue};
1725     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1726     for (const auto &OpIdx : enumerate(Op->ops())) {
1727       const SDValue &V = OpIdx.value();
1728       if (V.isUndef() || !Processed.insert(V).second)
1729         continue;
1730       if (ValueCounts[V] == 1) {
1731         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1732                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1733       } else {
1734         // Blend in all instances of this value using a VSELECT, using a
1735         // mask where each bit signals whether that element is the one
1736         // we're after.
1737         SmallVector<SDValue> Ops;
1738         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1739           return DAG.getConstant(V == V1, DL, XLenVT);
1740         });
1741         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1742                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1743                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1744       }
1745     }
1746 
1747     return Vec;
1748   }
1749 
1750   return SDValue();
1751 }
1752 
1753 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1754                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1755   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1756     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1757     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1758     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1759     // node in order to try and match RVV vector/scalar instructions.
1760     if ((LoC >> 31) == HiC)
1761       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1762   }
1763 
1764   // Fall back to a stack store and stride x0 vector load.
1765   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1766 }
1767 
1768 // Called by type legalization to handle splat of i64 on RV32.
1769 // FIXME: We can optimize this when the type has sign or zero bits in one
1770 // of the halves.
1771 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1772                                    SDValue VL, SelectionDAG &DAG) {
1773   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1774   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1775                            DAG.getConstant(0, DL, MVT::i32));
1776   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1777                            DAG.getConstant(1, DL, MVT::i32));
1778   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1779 }
1780 
1781 // This function lowers a splat of a scalar operand Splat with the vector
1782 // length VL. It ensures the final sequence is type legal, which is useful when
1783 // lowering a splat after type legalization.
1784 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1785                                 SelectionDAG &DAG,
1786                                 const RISCVSubtarget &Subtarget) {
1787   if (VT.isFloatingPoint())
1788     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1789 
1790   MVT XLenVT = Subtarget.getXLenVT();
1791 
1792   // Simplest case is that the operand needs to be promoted to XLenVT.
1793   if (Scalar.getValueType().bitsLE(XLenVT)) {
1794     // If the operand is a constant, sign extend to increase our chances
1795     // of being able to use a .vi instruction. ANY_EXTEND would become a
1796     // a zero extend and the simm5 check in isel would fail.
1797     // FIXME: Should we ignore the upper bits in isel instead?
1798     unsigned ExtOpc =
1799         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1800     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1801     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1802   }
1803 
1804   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1805          "Unexpected scalar for splat lowering!");
1806 
1807   // Otherwise use the more complicated splatting algorithm.
1808   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1809 }
1810 
1811 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1812                                    const RISCVSubtarget &Subtarget) {
1813   SDValue V1 = Op.getOperand(0);
1814   SDValue V2 = Op.getOperand(1);
1815   SDLoc DL(Op);
1816   MVT XLenVT = Subtarget.getXLenVT();
1817   MVT VT = Op.getSimpleValueType();
1818   unsigned NumElts = VT.getVectorNumElements();
1819   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1820 
1821   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1822 
1823   SDValue TrueMask, VL;
1824   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1825 
1826   if (SVN->isSplat()) {
1827     const int Lane = SVN->getSplatIndex();
1828     if (Lane >= 0) {
1829       MVT SVT = VT.getVectorElementType();
1830 
1831       // Turn splatted vector load into a strided load with an X0 stride.
1832       SDValue V = V1;
1833       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1834       // with undef.
1835       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1836       int Offset = Lane;
1837       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1838         int OpElements =
1839             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1840         V = V.getOperand(Offset / OpElements);
1841         Offset %= OpElements;
1842       }
1843 
1844       // We need to ensure the load isn't atomic or volatile.
1845       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1846         auto *Ld = cast<LoadSDNode>(V);
1847         Offset *= SVT.getStoreSize();
1848         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1849                                                    TypeSize::Fixed(Offset), DL);
1850 
1851         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1852         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1853           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1854           SDValue IntID =
1855               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1856           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1857                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1858           SDValue NewLoad = DAG.getMemIntrinsicNode(
1859               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1860               DAG.getMachineFunction().getMachineMemOperand(
1861                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1862           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1863           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1864         }
1865 
1866         // Otherwise use a scalar load and splat. This will give the best
1867         // opportunity to fold a splat into the operation. ISel can turn it into
1868         // the x0 strided load if we aren't able to fold away the select.
1869         if (SVT.isFloatingPoint())
1870           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1871                           Ld->getPointerInfo().getWithOffset(Offset),
1872                           Ld->getOriginalAlign(),
1873                           Ld->getMemOperand()->getFlags());
1874         else
1875           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1876                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1877                              Ld->getOriginalAlign(),
1878                              Ld->getMemOperand()->getFlags());
1879         DAG.makeEquivalentMemoryOrdering(Ld, V);
1880 
1881         unsigned Opc =
1882             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1883         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1884         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1885       }
1886 
1887       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1888       assert(Lane < (int)NumElts && "Unexpected lane!");
1889       SDValue Gather =
1890           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1891                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1892       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1893     }
1894   }
1895 
1896   // Detect shuffles which can be re-expressed as vector selects; these are
1897   // shuffles in which each element in the destination is taken from an element
1898   // at the corresponding index in either source vectors.
1899   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1900     int MaskIndex = MaskIdx.value();
1901     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1902   });
1903 
1904   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1905 
1906   SmallVector<SDValue> MaskVals;
1907   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1908   // merged with a second vrgather.
1909   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1910 
1911   // By default we preserve the original operand order, and use a mask to
1912   // select LHS as true and RHS as false. However, since RVV vector selects may
1913   // feature splats but only on the LHS, we may choose to invert our mask and
1914   // instead select between RHS and LHS.
1915   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1916   bool InvertMask = IsSelect == SwapOps;
1917 
1918   // Now construct the mask that will be used by the vselect or blended
1919   // vrgather operation. For vrgathers, construct the appropriate indices into
1920   // each vector.
1921   for (int MaskIndex : SVN->getMask()) {
1922     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1923     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1924     if (!IsSelect) {
1925       bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
1926       GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
1927                                      ? DAG.getConstant(MaskIndex, DL, XLenVT)
1928                                      : DAG.getUNDEF(XLenVT));
1929       GatherIndicesRHS.push_back(
1930           IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
1931                             : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
1932     }
1933   }
1934 
1935   if (SwapOps) {
1936     std::swap(V1, V2);
1937     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1938   }
1939 
1940   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1941   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1942   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1943 
1944   if (IsSelect)
1945     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1946 
1947   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1948     // On such a large vector we're unable to use i8 as the index type.
1949     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1950     // may involve vector splitting if we're already at LMUL=8, or our
1951     // user-supplied maximum fixed-length LMUL.
1952     return SDValue();
1953   }
1954 
1955   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1956   MVT IndexVT = VT.changeTypeToInteger();
1957   // Since we can't introduce illegal index types at this stage, use i16 and
1958   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1959   // than XLenVT.
1960   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1961     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1962     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1963   }
1964 
1965   MVT IndexContainerVT =
1966       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1967 
1968   SDValue Gather;
1969   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1970   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1971   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1972     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1973   } else {
1974     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1975     LHSIndices =
1976         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1977 
1978     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1979     Gather =
1980         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1981   }
1982 
1983   // If a second vector operand is used by this shuffle, blend it in with an
1984   // additional vrgather.
1985   if (!V2.isUndef()) {
1986     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1987     SelectMask =
1988         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1989 
1990     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1991     RHSIndices =
1992         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1993 
1994     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1995     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1996     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1997                          Gather, VL);
1998   }
1999 
2000   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2001 }
2002 
2003 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2004                                      SDLoc DL, SelectionDAG &DAG,
2005                                      const RISCVSubtarget &Subtarget) {
2006   if (VT.isScalableVector())
2007     return DAG.getFPExtendOrRound(Op, DL, VT);
2008   assert(VT.isFixedLengthVector() &&
2009          "Unexpected value type for RVV FP extend/round lowering");
2010   SDValue Mask, VL;
2011   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2012   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2013                         ? RISCVISD::FP_EXTEND_VL
2014                         : RISCVISD::FP_ROUND_VL;
2015   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2016 }
2017 
2018 // While RVV has alignment restrictions, we should always be able to load as a
2019 // legal equivalently-sized byte-typed vector instead. This method is
2020 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2021 // the load is already correctly-aligned, it returns SDValue().
2022 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2023                                                     SelectionDAG &DAG) const {
2024   auto *Load = cast<LoadSDNode>(Op);
2025   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2026 
2027   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2028                                      Load->getMemoryVT(),
2029                                      *Load->getMemOperand()))
2030     return SDValue();
2031 
2032   SDLoc DL(Op);
2033   MVT VT = Op.getSimpleValueType();
2034   unsigned EltSizeBits = VT.getScalarSizeInBits();
2035   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2036          "Unexpected unaligned RVV load type");
2037   MVT NewVT =
2038       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2039   assert(NewVT.isValid() &&
2040          "Expecting equally-sized RVV vector types to be legal");
2041   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2042                           Load->getPointerInfo(), Load->getOriginalAlign(),
2043                           Load->getMemOperand()->getFlags());
2044   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2045 }
2046 
2047 // While RVV has alignment restrictions, we should always be able to store as a
2048 // legal equivalently-sized byte-typed vector instead. This method is
2049 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2050 // returns SDValue() if the store is already correctly aligned.
2051 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2052                                                      SelectionDAG &DAG) const {
2053   auto *Store = cast<StoreSDNode>(Op);
2054   assert(Store && Store->getValue().getValueType().isVector() &&
2055          "Expected vector store");
2056 
2057   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2058                                      Store->getMemoryVT(),
2059                                      *Store->getMemOperand()))
2060     return SDValue();
2061 
2062   SDLoc DL(Op);
2063   SDValue StoredVal = Store->getValue();
2064   MVT VT = StoredVal.getSimpleValueType();
2065   unsigned EltSizeBits = VT.getScalarSizeInBits();
2066   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2067          "Unexpected unaligned RVV store type");
2068   MVT NewVT =
2069       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2070   assert(NewVT.isValid() &&
2071          "Expecting equally-sized RVV vector types to be legal");
2072   StoredVal = DAG.getBitcast(NewVT, StoredVal);
2073   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2074                       Store->getPointerInfo(), Store->getOriginalAlign(),
2075                       Store->getMemOperand()->getFlags());
2076 }
2077 
2078 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2079                                             SelectionDAG &DAG) const {
2080   switch (Op.getOpcode()) {
2081   default:
2082     report_fatal_error("unimplemented operand");
2083   case ISD::GlobalAddress:
2084     return lowerGlobalAddress(Op, DAG);
2085   case ISD::BlockAddress:
2086     return lowerBlockAddress(Op, DAG);
2087   case ISD::ConstantPool:
2088     return lowerConstantPool(Op, DAG);
2089   case ISD::JumpTable:
2090     return lowerJumpTable(Op, DAG);
2091   case ISD::GlobalTLSAddress:
2092     return lowerGlobalTLSAddress(Op, DAG);
2093   case ISD::SELECT:
2094     return lowerSELECT(Op, DAG);
2095   case ISD::BRCOND:
2096     return lowerBRCOND(Op, DAG);
2097   case ISD::VASTART:
2098     return lowerVASTART(Op, DAG);
2099   case ISD::FRAMEADDR:
2100     return lowerFRAMEADDR(Op, DAG);
2101   case ISD::RETURNADDR:
2102     return lowerRETURNADDR(Op, DAG);
2103   case ISD::SHL_PARTS:
2104     return lowerShiftLeftParts(Op, DAG);
2105   case ISD::SRA_PARTS:
2106     return lowerShiftRightParts(Op, DAG, true);
2107   case ISD::SRL_PARTS:
2108     return lowerShiftRightParts(Op, DAG, false);
2109   case ISD::BITCAST: {
2110     SDLoc DL(Op);
2111     EVT VT = Op.getValueType();
2112     SDValue Op0 = Op.getOperand(0);
2113     EVT Op0VT = Op0.getValueType();
2114     MVT XLenVT = Subtarget.getXLenVT();
2115     if (VT.isFixedLengthVector()) {
2116       // We can handle fixed length vector bitcasts with a simple replacement
2117       // in isel.
2118       if (Op0VT.isFixedLengthVector())
2119         return Op;
2120       // When bitcasting from scalar to fixed-length vector, insert the scalar
2121       // into a one-element vector of the result type, and perform a vector
2122       // bitcast.
2123       if (!Op0VT.isVector()) {
2124         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2125         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2126                                               DAG.getUNDEF(BVT), Op0,
2127                                               DAG.getConstant(0, DL, XLenVT)));
2128       }
2129       return SDValue();
2130     }
2131     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2132     // thus: bitcast the vector to a one-element vector type whose element type
2133     // is the same as the result type, and extract the first element.
2134     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2135       LLVMContext &Context = *DAG.getContext();
2136       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2137       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2138                          DAG.getConstant(0, DL, XLenVT));
2139     }
2140     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2141       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2142       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2143       return FPConv;
2144     }
2145     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2146         Subtarget.hasStdExtF()) {
2147       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2148       SDValue FPConv =
2149           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2150       return FPConv;
2151     }
2152     return SDValue();
2153   }
2154   case ISD::INTRINSIC_WO_CHAIN:
2155     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2156   case ISD::INTRINSIC_W_CHAIN:
2157     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2158   case ISD::BSWAP:
2159   case ISD::BITREVERSE: {
2160     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2161     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2162     MVT VT = Op.getSimpleValueType();
2163     SDLoc DL(Op);
2164     // Start with the maximum immediate value which is the bitwidth - 1.
2165     unsigned Imm = VT.getSizeInBits() - 1;
2166     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2167     if (Op.getOpcode() == ISD::BSWAP)
2168       Imm &= ~0x7U;
2169     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2170                        DAG.getConstant(Imm, DL, VT));
2171   }
2172   case ISD::FSHL:
2173   case ISD::FSHR: {
2174     MVT VT = Op.getSimpleValueType();
2175     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2176     SDLoc DL(Op);
2177     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2178       return Op;
2179     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2180     // use log(XLen) bits. Mask the shift amount accordingly.
2181     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2182     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2183                                 DAG.getConstant(ShAmtWidth, DL, VT));
2184     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2185     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2186   }
2187   case ISD::TRUNCATE: {
2188     SDLoc DL(Op);
2189     MVT VT = Op.getSimpleValueType();
2190     // Only custom-lower vector truncates
2191     if (!VT.isVector())
2192       return Op;
2193 
2194     // Truncates to mask types are handled differently
2195     if (VT.getVectorElementType() == MVT::i1)
2196       return lowerVectorMaskTrunc(Op, DAG);
2197 
2198     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2199     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2200     // truncate by one power of two at a time.
2201     MVT DstEltVT = VT.getVectorElementType();
2202 
2203     SDValue Src = Op.getOperand(0);
2204     MVT SrcVT = Src.getSimpleValueType();
2205     MVT SrcEltVT = SrcVT.getVectorElementType();
2206 
2207     assert(DstEltVT.bitsLT(SrcEltVT) &&
2208            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2209            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2210            "Unexpected vector truncate lowering");
2211 
2212     MVT ContainerVT = SrcVT;
2213     if (SrcVT.isFixedLengthVector()) {
2214       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2215       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2216     }
2217 
2218     SDValue Result = Src;
2219     SDValue Mask, VL;
2220     std::tie(Mask, VL) =
2221         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2222     LLVMContext &Context = *DAG.getContext();
2223     const ElementCount Count = ContainerVT.getVectorElementCount();
2224     do {
2225       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2226       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2227       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2228                            Mask, VL);
2229     } while (SrcEltVT != DstEltVT);
2230 
2231     if (SrcVT.isFixedLengthVector())
2232       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2233 
2234     return Result;
2235   }
2236   case ISD::ANY_EXTEND:
2237   case ISD::ZERO_EXTEND:
2238     if (Op.getOperand(0).getValueType().isVector() &&
2239         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2240       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2241     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2242   case ISD::SIGN_EXTEND:
2243     if (Op.getOperand(0).getValueType().isVector() &&
2244         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2245       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2246     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2247   case ISD::SPLAT_VECTOR_PARTS:
2248     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2249   case ISD::INSERT_VECTOR_ELT:
2250     return lowerINSERT_VECTOR_ELT(Op, DAG);
2251   case ISD::EXTRACT_VECTOR_ELT:
2252     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2253   case ISD::VSCALE: {
2254     MVT VT = Op.getSimpleValueType();
2255     SDLoc DL(Op);
2256     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2257     // We define our scalable vector types for lmul=1 to use a 64 bit known
2258     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2259     // vscale as VLENB / 8.
2260     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2261     if (isa<ConstantSDNode>(Op.getOperand(0))) {
2262       // We assume VLENB is a multiple of 8. We manually choose the best shift
2263       // here because SimplifyDemandedBits isn't always able to simplify it.
2264       uint64_t Val = Op.getConstantOperandVal(0);
2265       if (isPowerOf2_64(Val)) {
2266         uint64_t Log2 = Log2_64(Val);
2267         if (Log2 < 3)
2268           return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2269                              DAG.getConstant(3 - Log2, DL, VT));
2270         if (Log2 > 3)
2271           return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2272                              DAG.getConstant(Log2 - 3, DL, VT));
2273         return VLENB;
2274       }
2275       // If the multiplier is a multiple of 8, scale it down to avoid needing
2276       // to shift the VLENB value.
2277       if ((Val % 8) == 0)
2278         return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2279                            DAG.getConstant(Val / 8, DL, VT));
2280     }
2281 
2282     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2283                                  DAG.getConstant(3, DL, VT));
2284     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2285   }
2286   case ISD::FP_EXTEND: {
2287     // RVV can only do fp_extend to types double the size as the source. We
2288     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2289     // via f32.
2290     SDLoc DL(Op);
2291     MVT VT = Op.getSimpleValueType();
2292     SDValue Src = Op.getOperand(0);
2293     MVT SrcVT = Src.getSimpleValueType();
2294 
2295     // Prepare any fixed-length vector operands.
2296     MVT ContainerVT = VT;
2297     if (SrcVT.isFixedLengthVector()) {
2298       ContainerVT = getContainerForFixedLengthVector(VT);
2299       MVT SrcContainerVT =
2300           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2301       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2302     }
2303 
2304     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2305         SrcVT.getVectorElementType() != MVT::f16) {
2306       // For scalable vectors, we only need to close the gap between
2307       // vXf16->vXf64.
2308       if (!VT.isFixedLengthVector())
2309         return Op;
2310       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2311       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2312       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2313     }
2314 
2315     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2316     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2317     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2318         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2319 
2320     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2321                                            DL, DAG, Subtarget);
2322     if (VT.isFixedLengthVector())
2323       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2324     return Extend;
2325   }
2326   case ISD::FP_ROUND: {
2327     // RVV can only do fp_round to types half the size as the source. We
2328     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2329     // conversion instruction.
2330     SDLoc DL(Op);
2331     MVT VT = Op.getSimpleValueType();
2332     SDValue Src = Op.getOperand(0);
2333     MVT SrcVT = Src.getSimpleValueType();
2334 
2335     // Prepare any fixed-length vector operands.
2336     MVT ContainerVT = VT;
2337     if (VT.isFixedLengthVector()) {
2338       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2339       ContainerVT =
2340           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2341       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2342     }
2343 
2344     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2345         SrcVT.getVectorElementType() != MVT::f64) {
2346       // For scalable vectors, we only need to close the gap between
2347       // vXf64<->vXf16.
2348       if (!VT.isFixedLengthVector())
2349         return Op;
2350       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2351       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2352       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2353     }
2354 
2355     SDValue Mask, VL;
2356     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2357 
2358     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2359     SDValue IntermediateRound =
2360         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2361     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2362                                           DL, DAG, Subtarget);
2363 
2364     if (VT.isFixedLengthVector())
2365       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2366     return Round;
2367   }
2368   case ISD::FP_TO_SINT:
2369   case ISD::FP_TO_UINT:
2370   case ISD::SINT_TO_FP:
2371   case ISD::UINT_TO_FP: {
2372     // RVV can only do fp<->int conversions to types half/double the size as
2373     // the source. We custom-lower any conversions that do two hops into
2374     // sequences.
2375     MVT VT = Op.getSimpleValueType();
2376     if (!VT.isVector())
2377       return Op;
2378     SDLoc DL(Op);
2379     SDValue Src = Op.getOperand(0);
2380     MVT EltVT = VT.getVectorElementType();
2381     MVT SrcVT = Src.getSimpleValueType();
2382     MVT SrcEltVT = SrcVT.getVectorElementType();
2383     unsigned EltSize = EltVT.getSizeInBits();
2384     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2385     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2386            "Unexpected vector element types");
2387 
2388     bool IsInt2FP = SrcEltVT.isInteger();
2389     // Widening conversions
2390     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2391       if (IsInt2FP) {
2392         // Do a regular integer sign/zero extension then convert to float.
2393         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2394                                       VT.getVectorElementCount());
2395         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2396                                  ? ISD::ZERO_EXTEND
2397                                  : ISD::SIGN_EXTEND;
2398         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2399         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2400       }
2401       // FP2Int
2402       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2403       // Do one doubling fp_extend then complete the operation by converting
2404       // to int.
2405       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2406       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2407       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2408     }
2409 
2410     // Narrowing conversions
2411     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2412       if (IsInt2FP) {
2413         // One narrowing int_to_fp, then an fp_round.
2414         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2415         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2416         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2417         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2418       }
2419       // FP2Int
2420       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2421       // representable by the integer, the result is poison.
2422       MVT IVecVT =
2423           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2424                            VT.getVectorElementCount());
2425       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2426       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2427     }
2428 
2429     // Scalable vectors can exit here. Patterns will handle equally-sized
2430     // conversions halving/doubling ones.
2431     if (!VT.isFixedLengthVector())
2432       return Op;
2433 
2434     // For fixed-length vectors we lower to a custom "VL" node.
2435     unsigned RVVOpc = 0;
2436     switch (Op.getOpcode()) {
2437     default:
2438       llvm_unreachable("Impossible opcode");
2439     case ISD::FP_TO_SINT:
2440       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2441       break;
2442     case ISD::FP_TO_UINT:
2443       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2444       break;
2445     case ISD::SINT_TO_FP:
2446       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2447       break;
2448     case ISD::UINT_TO_FP:
2449       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2450       break;
2451     }
2452 
2453     MVT ContainerVT, SrcContainerVT;
2454     // Derive the reference container type from the larger vector type.
2455     if (SrcEltSize > EltSize) {
2456       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2457       ContainerVT =
2458           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2459     } else {
2460       ContainerVT = getContainerForFixedLengthVector(VT);
2461       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2462     }
2463 
2464     SDValue Mask, VL;
2465     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2466 
2467     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2468     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2469     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2470   }
2471   case ISD::VECREDUCE_ADD:
2472   case ISD::VECREDUCE_UMAX:
2473   case ISD::VECREDUCE_SMAX:
2474   case ISD::VECREDUCE_UMIN:
2475   case ISD::VECREDUCE_SMIN:
2476     return lowerVECREDUCE(Op, DAG);
2477   case ISD::VECREDUCE_AND:
2478   case ISD::VECREDUCE_OR:
2479   case ISD::VECREDUCE_XOR:
2480     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2481       return lowerVectorMaskVECREDUCE(Op, DAG);
2482     return lowerVECREDUCE(Op, DAG);
2483   case ISD::VECREDUCE_FADD:
2484   case ISD::VECREDUCE_SEQ_FADD:
2485   case ISD::VECREDUCE_FMIN:
2486   case ISD::VECREDUCE_FMAX:
2487     return lowerFPVECREDUCE(Op, DAG);
2488   case ISD::INSERT_SUBVECTOR:
2489     return lowerINSERT_SUBVECTOR(Op, DAG);
2490   case ISD::EXTRACT_SUBVECTOR:
2491     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2492   case ISD::STEP_VECTOR:
2493     return lowerSTEP_VECTOR(Op, DAG);
2494   case ISD::VECTOR_REVERSE:
2495     return lowerVECTOR_REVERSE(Op, DAG);
2496   case ISD::BUILD_VECTOR:
2497     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2498   case ISD::SPLAT_VECTOR:
2499     if (Op.getValueType().getVectorElementType() == MVT::i1)
2500       return lowerVectorMaskSplat(Op, DAG);
2501     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2502   case ISD::VECTOR_SHUFFLE:
2503     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2504   case ISD::CONCAT_VECTORS: {
2505     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2506     // better than going through the stack, as the default expansion does.
2507     SDLoc DL(Op);
2508     MVT VT = Op.getSimpleValueType();
2509     unsigned NumOpElts =
2510         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2511     SDValue Vec = DAG.getUNDEF(VT);
2512     for (const auto &OpIdx : enumerate(Op->ops()))
2513       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2514                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2515     return Vec;
2516   }
2517   case ISD::LOAD:
2518     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2519       return V;
2520     if (Op.getValueType().isFixedLengthVector())
2521       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2522     return Op;
2523   case ISD::STORE:
2524     if (auto V = expandUnalignedRVVStore(Op, DAG))
2525       return V;
2526     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2527       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2528     return Op;
2529   case ISD::MLOAD:
2530     return lowerMLOAD(Op, DAG);
2531   case ISD::MSTORE:
2532     return lowerMSTORE(Op, DAG);
2533   case ISD::SETCC:
2534     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2535   case ISD::ADD:
2536     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2537   case ISD::SUB:
2538     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2539   case ISD::MUL:
2540     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2541   case ISD::MULHS:
2542     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2543   case ISD::MULHU:
2544     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2545   case ISD::AND:
2546     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2547                                               RISCVISD::AND_VL);
2548   case ISD::OR:
2549     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2550                                               RISCVISD::OR_VL);
2551   case ISD::XOR:
2552     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2553                                               RISCVISD::XOR_VL);
2554   case ISD::SDIV:
2555     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2556   case ISD::SREM:
2557     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2558   case ISD::UDIV:
2559     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2560   case ISD::UREM:
2561     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2562   case ISD::SHL:
2563   case ISD::SRA:
2564   case ISD::SRL:
2565     if (Op.getSimpleValueType().isFixedLengthVector())
2566       return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2567     // This can be called for an i32 shift amount that needs to be promoted.
2568     assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2569            "Unexpected custom legalisation");
2570     return SDValue();
2571   case ISD::FADD:
2572     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2573   case ISD::FSUB:
2574     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2575   case ISD::FMUL:
2576     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2577   case ISD::FDIV:
2578     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2579   case ISD::FNEG:
2580     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2581   case ISD::FABS:
2582     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2583   case ISD::FSQRT:
2584     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2585   case ISD::FMA:
2586     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2587   case ISD::SMIN:
2588     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2589   case ISD::SMAX:
2590     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2591   case ISD::UMIN:
2592     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2593   case ISD::UMAX:
2594     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2595   case ISD::FMINNUM:
2596     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2597   case ISD::FMAXNUM:
2598     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2599   case ISD::ABS:
2600     return lowerABS(Op, DAG);
2601   case ISD::VSELECT:
2602     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2603   case ISD::FCOPYSIGN:
2604     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2605   case ISD::MGATHER:
2606     return lowerMGATHER(Op, DAG);
2607   case ISD::MSCATTER:
2608     return lowerMSCATTER(Op, DAG);
2609   case ISD::FLT_ROUNDS_:
2610     return lowerGET_ROUNDING(Op, DAG);
2611   case ISD::SET_ROUNDING:
2612     return lowerSET_ROUNDING(Op, DAG);
2613   case ISD::VP_ADD:
2614     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2615   case ISD::VP_SUB:
2616     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2617   case ISD::VP_MUL:
2618     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2619   case ISD::VP_SDIV:
2620     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2621   case ISD::VP_UDIV:
2622     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2623   case ISD::VP_SREM:
2624     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2625   case ISD::VP_UREM:
2626     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2627   case ISD::VP_AND:
2628     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2629   case ISD::VP_OR:
2630     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2631   case ISD::VP_XOR:
2632     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2633   case ISD::VP_ASHR:
2634     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2635   case ISD::VP_LSHR:
2636     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2637   case ISD::VP_SHL:
2638     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2639   case ISD::VP_FADD:
2640     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2641   case ISD::VP_FSUB:
2642     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2643   case ISD::VP_FMUL:
2644     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2645   case ISD::VP_FDIV:
2646     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2647   }
2648 }
2649 
2650 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2651                              SelectionDAG &DAG, unsigned Flags) {
2652   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2653 }
2654 
2655 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2656                              SelectionDAG &DAG, unsigned Flags) {
2657   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2658                                    Flags);
2659 }
2660 
2661 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2662                              SelectionDAG &DAG, unsigned Flags) {
2663   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2664                                    N->getOffset(), Flags);
2665 }
2666 
2667 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2668                              SelectionDAG &DAG, unsigned Flags) {
2669   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2670 }
2671 
2672 template <class NodeTy>
2673 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2674                                      bool IsLocal) const {
2675   SDLoc DL(N);
2676   EVT Ty = getPointerTy(DAG.getDataLayout());
2677 
2678   if (isPositionIndependent()) {
2679     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2680     if (IsLocal)
2681       // Use PC-relative addressing to access the symbol. This generates the
2682       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2683       // %pcrel_lo(auipc)).
2684       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2685 
2686     // Use PC-relative addressing to access the GOT for this symbol, then load
2687     // the address from the GOT. This generates the pattern (PseudoLA sym),
2688     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2689     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2690   }
2691 
2692   switch (getTargetMachine().getCodeModel()) {
2693   default:
2694     report_fatal_error("Unsupported code model for lowering");
2695   case CodeModel::Small: {
2696     // Generate a sequence for accessing addresses within the first 2 GiB of
2697     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2698     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2699     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2700     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2701     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2702   }
2703   case CodeModel::Medium: {
2704     // Generate a sequence for accessing addresses within any 2GiB range within
2705     // the address space. This generates the pattern (PseudoLLA sym), which
2706     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2707     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2708     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2709   }
2710   }
2711 }
2712 
2713 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2714                                                 SelectionDAG &DAG) const {
2715   SDLoc DL(Op);
2716   EVT Ty = Op.getValueType();
2717   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2718   int64_t Offset = N->getOffset();
2719   MVT XLenVT = Subtarget.getXLenVT();
2720 
2721   const GlobalValue *GV = N->getGlobal();
2722   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2723   SDValue Addr = getAddr(N, DAG, IsLocal);
2724 
2725   // In order to maximise the opportunity for common subexpression elimination,
2726   // emit a separate ADD node for the global address offset instead of folding
2727   // it in the global address node. Later peephole optimisations may choose to
2728   // fold it back in when profitable.
2729   if (Offset != 0)
2730     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2731                        DAG.getConstant(Offset, DL, XLenVT));
2732   return Addr;
2733 }
2734 
2735 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2736                                                SelectionDAG &DAG) const {
2737   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2738 
2739   return getAddr(N, DAG);
2740 }
2741 
2742 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2743                                                SelectionDAG &DAG) const {
2744   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2745 
2746   return getAddr(N, DAG);
2747 }
2748 
2749 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2750                                             SelectionDAG &DAG) const {
2751   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2752 
2753   return getAddr(N, DAG);
2754 }
2755 
2756 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2757                                               SelectionDAG &DAG,
2758                                               bool UseGOT) const {
2759   SDLoc DL(N);
2760   EVT Ty = getPointerTy(DAG.getDataLayout());
2761   const GlobalValue *GV = N->getGlobal();
2762   MVT XLenVT = Subtarget.getXLenVT();
2763 
2764   if (UseGOT) {
2765     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2766     // load the address from the GOT and add the thread pointer. This generates
2767     // the pattern (PseudoLA_TLS_IE sym), which expands to
2768     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2769     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2770     SDValue Load =
2771         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2772 
2773     // Add the thread pointer.
2774     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2775     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2776   }
2777 
2778   // Generate a sequence for accessing the address relative to the thread
2779   // pointer, with the appropriate adjustment for the thread pointer offset.
2780   // This generates the pattern
2781   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2782   SDValue AddrHi =
2783       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2784   SDValue AddrAdd =
2785       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2786   SDValue AddrLo =
2787       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2788 
2789   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2790   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2791   SDValue MNAdd = SDValue(
2792       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2793       0);
2794   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2795 }
2796 
2797 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2798                                                SelectionDAG &DAG) const {
2799   SDLoc DL(N);
2800   EVT Ty = getPointerTy(DAG.getDataLayout());
2801   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2802   const GlobalValue *GV = N->getGlobal();
2803 
2804   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2805   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2806   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2807   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2808   SDValue Load =
2809       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2810 
2811   // Prepare argument list to generate call.
2812   ArgListTy Args;
2813   ArgListEntry Entry;
2814   Entry.Node = Load;
2815   Entry.Ty = CallTy;
2816   Args.push_back(Entry);
2817 
2818   // Setup call to __tls_get_addr.
2819   TargetLowering::CallLoweringInfo CLI(DAG);
2820   CLI.setDebugLoc(DL)
2821       .setChain(DAG.getEntryNode())
2822       .setLibCallee(CallingConv::C, CallTy,
2823                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2824                     std::move(Args));
2825 
2826   return LowerCallTo(CLI).first;
2827 }
2828 
2829 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2830                                                    SelectionDAG &DAG) const {
2831   SDLoc DL(Op);
2832   EVT Ty = Op.getValueType();
2833   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2834   int64_t Offset = N->getOffset();
2835   MVT XLenVT = Subtarget.getXLenVT();
2836 
2837   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2838 
2839   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2840       CallingConv::GHC)
2841     report_fatal_error("In GHC calling convention TLS is not supported");
2842 
2843   SDValue Addr;
2844   switch (Model) {
2845   case TLSModel::LocalExec:
2846     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2847     break;
2848   case TLSModel::InitialExec:
2849     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2850     break;
2851   case TLSModel::LocalDynamic:
2852   case TLSModel::GeneralDynamic:
2853     Addr = getDynamicTLSAddr(N, DAG);
2854     break;
2855   }
2856 
2857   // In order to maximise the opportunity for common subexpression elimination,
2858   // emit a separate ADD node for the global address offset instead of folding
2859   // it in the global address node. Later peephole optimisations may choose to
2860   // fold it back in when profitable.
2861   if (Offset != 0)
2862     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2863                        DAG.getConstant(Offset, DL, XLenVT));
2864   return Addr;
2865 }
2866 
2867 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2868   SDValue CondV = Op.getOperand(0);
2869   SDValue TrueV = Op.getOperand(1);
2870   SDValue FalseV = Op.getOperand(2);
2871   SDLoc DL(Op);
2872   MVT VT = Op.getSimpleValueType();
2873   MVT XLenVT = Subtarget.getXLenVT();
2874 
2875   // Lower vector SELECTs to VSELECTs by splatting the condition.
2876   if (VT.isVector()) {
2877     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2878     SDValue CondSplat = VT.isScalableVector()
2879                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2880                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2881     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2882   }
2883 
2884   // If the result type is XLenVT and CondV is the output of a SETCC node
2885   // which also operated on XLenVT inputs, then merge the SETCC node into the
2886   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2887   // compare+branch instructions. i.e.:
2888   // (select (setcc lhs, rhs, cc), truev, falsev)
2889   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2890   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2891       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2892     SDValue LHS = CondV.getOperand(0);
2893     SDValue RHS = CondV.getOperand(1);
2894     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2895     ISD::CondCode CCVal = CC->get();
2896 
2897     // Special case for a select of 2 constants that have a diffence of 1.
2898     // Normally this is done by DAGCombine, but if the select is introduced by
2899     // type legalization or op legalization, we miss it. Restricting to SETLT
2900     // case for now because that is what signed saturating add/sub need.
2901     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2902     // but we would probably want to swap the true/false values if the condition
2903     // is SETGE/SETLE to avoid an XORI.
2904     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2905         CCVal == ISD::SETLT) {
2906       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2907       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2908       if (TrueVal - 1 == FalseVal)
2909         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2910       if (TrueVal + 1 == FalseVal)
2911         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2912     }
2913 
2914     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2915 
2916     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2917     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2918     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2919   }
2920 
2921   // Otherwise:
2922   // (select condv, truev, falsev)
2923   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2924   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2925   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2926 
2927   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2928 
2929   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2930 }
2931 
2932 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2933   SDValue CondV = Op.getOperand(1);
2934   SDLoc DL(Op);
2935   MVT XLenVT = Subtarget.getXLenVT();
2936 
2937   if (CondV.getOpcode() == ISD::SETCC &&
2938       CondV.getOperand(0).getValueType() == XLenVT) {
2939     SDValue LHS = CondV.getOperand(0);
2940     SDValue RHS = CondV.getOperand(1);
2941     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2942 
2943     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2944 
2945     SDValue TargetCC = DAG.getCondCode(CCVal);
2946     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2947                        LHS, RHS, TargetCC, Op.getOperand(2));
2948   }
2949 
2950   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2951                      CondV, DAG.getConstant(0, DL, XLenVT),
2952                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2953 }
2954 
2955 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2956   MachineFunction &MF = DAG.getMachineFunction();
2957   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2958 
2959   SDLoc DL(Op);
2960   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2961                                  getPointerTy(MF.getDataLayout()));
2962 
2963   // vastart just stores the address of the VarArgsFrameIndex slot into the
2964   // memory location argument.
2965   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2966   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2967                       MachinePointerInfo(SV));
2968 }
2969 
2970 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2971                                             SelectionDAG &DAG) const {
2972   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2973   MachineFunction &MF = DAG.getMachineFunction();
2974   MachineFrameInfo &MFI = MF.getFrameInfo();
2975   MFI.setFrameAddressIsTaken(true);
2976   Register FrameReg = RI.getFrameRegister(MF);
2977   int XLenInBytes = Subtarget.getXLen() / 8;
2978 
2979   EVT VT = Op.getValueType();
2980   SDLoc DL(Op);
2981   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2982   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2983   while (Depth--) {
2984     int Offset = -(XLenInBytes * 2);
2985     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2986                               DAG.getIntPtrConstant(Offset, DL));
2987     FrameAddr =
2988         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2989   }
2990   return FrameAddr;
2991 }
2992 
2993 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2994                                              SelectionDAG &DAG) const {
2995   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2996   MachineFunction &MF = DAG.getMachineFunction();
2997   MachineFrameInfo &MFI = MF.getFrameInfo();
2998   MFI.setReturnAddressIsTaken(true);
2999   MVT XLenVT = Subtarget.getXLenVT();
3000   int XLenInBytes = Subtarget.getXLen() / 8;
3001 
3002   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3003     return SDValue();
3004 
3005   EVT VT = Op.getValueType();
3006   SDLoc DL(Op);
3007   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3008   if (Depth) {
3009     int Off = -XLenInBytes;
3010     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3011     SDValue Offset = DAG.getConstant(Off, DL, VT);
3012     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3013                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3014                        MachinePointerInfo());
3015   }
3016 
3017   // Return the value of the return address register, marking it an implicit
3018   // live-in.
3019   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3020   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3021 }
3022 
3023 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3024                                                  SelectionDAG &DAG) const {
3025   SDLoc DL(Op);
3026   SDValue Lo = Op.getOperand(0);
3027   SDValue Hi = Op.getOperand(1);
3028   SDValue Shamt = Op.getOperand(2);
3029   EVT VT = Lo.getValueType();
3030 
3031   // if Shamt-XLEN < 0: // Shamt < XLEN
3032   //   Lo = Lo << Shamt
3033   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3034   // else:
3035   //   Lo = 0
3036   //   Hi = Lo << (Shamt-XLEN)
3037 
3038   SDValue Zero = DAG.getConstant(0, DL, VT);
3039   SDValue One = DAG.getConstant(1, DL, VT);
3040   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3041   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3042   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3043   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3044 
3045   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3046   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3047   SDValue ShiftRightLo =
3048       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3049   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3050   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3051   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3052 
3053   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3054 
3055   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3056   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3057 
3058   SDValue Parts[2] = {Lo, Hi};
3059   return DAG.getMergeValues(Parts, DL);
3060 }
3061 
3062 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3063                                                   bool IsSRA) const {
3064   SDLoc DL(Op);
3065   SDValue Lo = Op.getOperand(0);
3066   SDValue Hi = Op.getOperand(1);
3067   SDValue Shamt = Op.getOperand(2);
3068   EVT VT = Lo.getValueType();
3069 
3070   // SRA expansion:
3071   //   if Shamt-XLEN < 0: // Shamt < XLEN
3072   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3073   //     Hi = Hi >>s Shamt
3074   //   else:
3075   //     Lo = Hi >>s (Shamt-XLEN);
3076   //     Hi = Hi >>s (XLEN-1)
3077   //
3078   // SRL expansion:
3079   //   if Shamt-XLEN < 0: // Shamt < XLEN
3080   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3081   //     Hi = Hi >>u Shamt
3082   //   else:
3083   //     Lo = Hi >>u (Shamt-XLEN);
3084   //     Hi = 0;
3085 
3086   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3087 
3088   SDValue Zero = DAG.getConstant(0, DL, VT);
3089   SDValue One = DAG.getConstant(1, DL, VT);
3090   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3091   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3092   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3093   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3094 
3095   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3096   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3097   SDValue ShiftLeftHi =
3098       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3099   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3100   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3101   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3102   SDValue HiFalse =
3103       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3104 
3105   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3106 
3107   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3108   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3109 
3110   SDValue Parts[2] = {Lo, Hi};
3111   return DAG.getMergeValues(Parts, DL);
3112 }
3113 
3114 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3115 // legal equivalently-sized i8 type, so we can use that as a go-between.
3116 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3117                                                   SelectionDAG &DAG) const {
3118   SDLoc DL(Op);
3119   MVT VT = Op.getSimpleValueType();
3120   SDValue SplatVal = Op.getOperand(0);
3121   // All-zeros or all-ones splats are handled specially.
3122   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3123     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3124     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3125   }
3126   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3127     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3128     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3129   }
3130   MVT XLenVT = Subtarget.getXLenVT();
3131   assert(SplatVal.getValueType() == XLenVT &&
3132          "Unexpected type for i1 splat value");
3133   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3134   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3135                          DAG.getConstant(1, DL, XLenVT));
3136   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3137   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3138   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3139 }
3140 
3141 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3142 // illegal (currently only vXi64 RV32).
3143 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3144 // them to SPLAT_VECTOR_I64
3145 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3146                                                      SelectionDAG &DAG) const {
3147   SDLoc DL(Op);
3148   MVT VecVT = Op.getSimpleValueType();
3149   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3150          "Unexpected SPLAT_VECTOR_PARTS lowering");
3151 
3152   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3153   SDValue Lo = Op.getOperand(0);
3154   SDValue Hi = Op.getOperand(1);
3155 
3156   if (VecVT.isFixedLengthVector()) {
3157     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3158     SDLoc DL(Op);
3159     SDValue Mask, VL;
3160     std::tie(Mask, VL) =
3161         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3162 
3163     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3164     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3165   }
3166 
3167   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3168     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3169     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3170     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3171     // node in order to try and match RVV vector/scalar instructions.
3172     if ((LoC >> 31) == HiC)
3173       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3174   }
3175 
3176   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3177   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3178       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3179       Hi.getConstantOperandVal(1) == 31)
3180     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3181 
3182   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3183   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3184                      DAG.getRegister(RISCV::X0, MVT::i64));
3185 }
3186 
3187 // Custom-lower extensions from mask vectors by using a vselect either with 1
3188 // for zero/any-extension or -1 for sign-extension:
3189 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3190 // Note that any-extension is lowered identically to zero-extension.
3191 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3192                                                 int64_t ExtTrueVal) const {
3193   SDLoc DL(Op);
3194   MVT VecVT = Op.getSimpleValueType();
3195   SDValue Src = Op.getOperand(0);
3196   // Only custom-lower extensions from mask types
3197   assert(Src.getValueType().isVector() &&
3198          Src.getValueType().getVectorElementType() == MVT::i1);
3199 
3200   MVT XLenVT = Subtarget.getXLenVT();
3201   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3202   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3203 
3204   if (VecVT.isScalableVector()) {
3205     // Be careful not to introduce illegal scalar types at this stage, and be
3206     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3207     // illegal and must be expanded. Since we know that the constants are
3208     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3209     bool IsRV32E64 =
3210         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3211 
3212     if (!IsRV32E64) {
3213       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3214       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3215     } else {
3216       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3217       SplatTrueVal =
3218           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3219     }
3220 
3221     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3222   }
3223 
3224   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3225   MVT I1ContainerVT =
3226       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3227 
3228   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3229 
3230   SDValue Mask, VL;
3231   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3232 
3233   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3234   SplatTrueVal =
3235       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3236   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3237                                SplatTrueVal, SplatZero, VL);
3238 
3239   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3240 }
3241 
3242 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3243     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3244   MVT ExtVT = Op.getSimpleValueType();
3245   // Only custom-lower extensions from fixed-length vector types.
3246   if (!ExtVT.isFixedLengthVector())
3247     return Op;
3248   MVT VT = Op.getOperand(0).getSimpleValueType();
3249   // Grab the canonical container type for the extended type. Infer the smaller
3250   // type from that to ensure the same number of vector elements, as we know
3251   // the LMUL will be sufficient to hold the smaller type.
3252   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3253   // Get the extended container type manually to ensure the same number of
3254   // vector elements between source and dest.
3255   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3256                                      ContainerExtVT.getVectorElementCount());
3257 
3258   SDValue Op1 =
3259       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3260 
3261   SDLoc DL(Op);
3262   SDValue Mask, VL;
3263   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3264 
3265   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3266 
3267   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3268 }
3269 
3270 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3271 // setcc operation:
3272 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3273 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3274                                                   SelectionDAG &DAG) const {
3275   SDLoc DL(Op);
3276   EVT MaskVT = Op.getValueType();
3277   // Only expect to custom-lower truncations to mask types
3278   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3279          "Unexpected type for vector mask lowering");
3280   SDValue Src = Op.getOperand(0);
3281   MVT VecVT = Src.getSimpleValueType();
3282 
3283   // If this is a fixed vector, we need to convert it to a scalable vector.
3284   MVT ContainerVT = VecVT;
3285   if (VecVT.isFixedLengthVector()) {
3286     ContainerVT = getContainerForFixedLengthVector(VecVT);
3287     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3288   }
3289 
3290   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3291   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3292 
3293   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3294   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3295 
3296   if (VecVT.isScalableVector()) {
3297     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3298     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3299   }
3300 
3301   SDValue Mask, VL;
3302   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3303 
3304   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3305   SDValue Trunc =
3306       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3307   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3308                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3309   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3310 }
3311 
3312 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3313 // first position of a vector, and that vector is slid up to the insert index.
3314 // By limiting the active vector length to index+1 and merging with the
3315 // original vector (with an undisturbed tail policy for elements >= VL), we
3316 // achieve the desired result of leaving all elements untouched except the one
3317 // at VL-1, which is replaced with the desired value.
3318 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3319                                                     SelectionDAG &DAG) const {
3320   SDLoc DL(Op);
3321   MVT VecVT = Op.getSimpleValueType();
3322   SDValue Vec = Op.getOperand(0);
3323   SDValue Val = Op.getOperand(1);
3324   SDValue Idx = Op.getOperand(2);
3325 
3326   if (VecVT.getVectorElementType() == MVT::i1) {
3327     // FIXME: For now we just promote to an i8 vector and insert into that,
3328     // but this is probably not optimal.
3329     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3330     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3331     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3332     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3333   }
3334 
3335   MVT ContainerVT = VecVT;
3336   // If the operand is a fixed-length vector, convert to a scalable one.
3337   if (VecVT.isFixedLengthVector()) {
3338     ContainerVT = getContainerForFixedLengthVector(VecVT);
3339     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3340   }
3341 
3342   MVT XLenVT = Subtarget.getXLenVT();
3343 
3344   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3345   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3346   // Even i64-element vectors on RV32 can be lowered without scalar
3347   // legalization if the most-significant 32 bits of the value are not affected
3348   // by the sign-extension of the lower 32 bits.
3349   // TODO: We could also catch sign extensions of a 32-bit value.
3350   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3351     const auto *CVal = cast<ConstantSDNode>(Val);
3352     if (isInt<32>(CVal->getSExtValue())) {
3353       IsLegalInsert = true;
3354       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3355     }
3356   }
3357 
3358   SDValue Mask, VL;
3359   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3360 
3361   SDValue ValInVec;
3362 
3363   if (IsLegalInsert) {
3364     unsigned Opc =
3365         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3366     if (isNullConstant(Idx)) {
3367       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3368       if (!VecVT.isFixedLengthVector())
3369         return Vec;
3370       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3371     }
3372     ValInVec =
3373         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3374   } else {
3375     // On RV32, i64-element vectors must be specially handled to place the
3376     // value at element 0, by using two vslide1up instructions in sequence on
3377     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3378     // this.
3379     SDValue One = DAG.getConstant(1, DL, XLenVT);
3380     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3381     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3382     MVT I32ContainerVT =
3383         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3384     SDValue I32Mask =
3385         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3386     // Limit the active VL to two.
3387     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3388     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3389     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3390     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3391                            InsertI64VL);
3392     // First slide in the hi value, then the lo in underneath it.
3393     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3394                            ValHi, I32Mask, InsertI64VL);
3395     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3396                            ValLo, I32Mask, InsertI64VL);
3397     // Bitcast back to the right container type.
3398     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3399   }
3400 
3401   // Now that the value is in a vector, slide it into position.
3402   SDValue InsertVL =
3403       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3404   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3405                                 ValInVec, Idx, Mask, InsertVL);
3406   if (!VecVT.isFixedLengthVector())
3407     return Slideup;
3408   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3409 }
3410 
3411 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3412 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3413 // types this is done using VMV_X_S to allow us to glean information about the
3414 // sign bits of the result.
3415 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3416                                                      SelectionDAG &DAG) const {
3417   SDLoc DL(Op);
3418   SDValue Idx = Op.getOperand(1);
3419   SDValue Vec = Op.getOperand(0);
3420   EVT EltVT = Op.getValueType();
3421   MVT VecVT = Vec.getSimpleValueType();
3422   MVT XLenVT = Subtarget.getXLenVT();
3423 
3424   if (VecVT.getVectorElementType() == MVT::i1) {
3425     // FIXME: For now we just promote to an i8 vector and extract from that,
3426     // but this is probably not optimal.
3427     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3428     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3429     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3430   }
3431 
3432   // If this is a fixed vector, we need to convert it to a scalable vector.
3433   MVT ContainerVT = VecVT;
3434   if (VecVT.isFixedLengthVector()) {
3435     ContainerVT = getContainerForFixedLengthVector(VecVT);
3436     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3437   }
3438 
3439   // If the index is 0, the vector is already in the right position.
3440   if (!isNullConstant(Idx)) {
3441     // Use a VL of 1 to avoid processing more elements than we need.
3442     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3443     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3444     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3445     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3446                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3447   }
3448 
3449   if (!EltVT.isInteger()) {
3450     // Floating-point extracts are handled in TableGen.
3451     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3452                        DAG.getConstant(0, DL, XLenVT));
3453   }
3454 
3455   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3456   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3457 }
3458 
3459 // Some RVV intrinsics may claim that they want an integer operand to be
3460 // promoted or expanded.
3461 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3462                                           const RISCVSubtarget &Subtarget) {
3463   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3464           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3465          "Unexpected opcode");
3466 
3467   if (!Subtarget.hasStdExtV())
3468     return SDValue();
3469 
3470   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3471   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3472   SDLoc DL(Op);
3473 
3474   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3475       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3476   if (!II || !II->SplatOperand)
3477     return SDValue();
3478 
3479   unsigned SplatOp = II->SplatOperand + HasChain;
3480   assert(SplatOp < Op.getNumOperands());
3481 
3482   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3483   SDValue &ScalarOp = Operands[SplatOp];
3484   MVT OpVT = ScalarOp.getSimpleValueType();
3485   MVT XLenVT = Subtarget.getXLenVT();
3486 
3487   // If this isn't a scalar, or its type is XLenVT we're done.
3488   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3489     return SDValue();
3490 
3491   // Simplest case is that the operand needs to be promoted to XLenVT.
3492   if (OpVT.bitsLT(XLenVT)) {
3493     // If the operand is a constant, sign extend to increase our chances
3494     // of being able to use a .vi instruction. ANY_EXTEND would become a
3495     // a zero extend and the simm5 check in isel would fail.
3496     // FIXME: Should we ignore the upper bits in isel instead?
3497     unsigned ExtOpc =
3498         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3499     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3500     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3501   }
3502 
3503   // Use the previous operand to get the vXi64 VT. The result might be a mask
3504   // VT for compares. Using the previous operand assumes that the previous
3505   // operand will never have a smaller element size than a scalar operand and
3506   // that a widening operation never uses SEW=64.
3507   // NOTE: If this fails the below assert, we can probably just find the
3508   // element count from any operand or result and use it to construct the VT.
3509   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3510   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3511 
3512   // The more complex case is when the scalar is larger than XLenVT.
3513   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3514          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3515 
3516   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3517   // on the instruction to sign-extend since SEW>XLEN.
3518   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3519     if (isInt<32>(CVal->getSExtValue())) {
3520       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3521       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3522     }
3523   }
3524 
3525   // We need to convert the scalar to a splat vector.
3526   // FIXME: Can we implicitly truncate the scalar if it is known to
3527   // be sign extended?
3528   // VL should be the last operand.
3529   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3530   assert(VL.getValueType() == XLenVT);
3531   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3532   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3533 }
3534 
3535 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3536                                                      SelectionDAG &DAG) const {
3537   unsigned IntNo = Op.getConstantOperandVal(0);
3538   SDLoc DL(Op);
3539   MVT XLenVT = Subtarget.getXLenVT();
3540 
3541   switch (IntNo) {
3542   default:
3543     break; // Don't custom lower most intrinsics.
3544   case Intrinsic::thread_pointer: {
3545     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3546     return DAG.getRegister(RISCV::X4, PtrVT);
3547   }
3548   case Intrinsic::riscv_orc_b:
3549     // Lower to the GORCI encoding for orc.b.
3550     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3551                        DAG.getConstant(7, DL, XLenVT));
3552   case Intrinsic::riscv_grev:
3553   case Intrinsic::riscv_gorc: {
3554     unsigned Opc =
3555         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3556     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3557   }
3558   case Intrinsic::riscv_shfl:
3559   case Intrinsic::riscv_unshfl: {
3560     unsigned Opc =
3561         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3562     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3563   }
3564   case Intrinsic::riscv_bcompress:
3565   case Intrinsic::riscv_bdecompress: {
3566     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3567                                                        : RISCVISD::BDECOMPRESS;
3568     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3569   }
3570   case Intrinsic::riscv_vmv_x_s:
3571     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3572     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3573                        Op.getOperand(1));
3574   case Intrinsic::riscv_vmv_v_x:
3575     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3576                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3577   case Intrinsic::riscv_vfmv_v_f:
3578     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3579                        Op.getOperand(1), Op.getOperand(2));
3580   case Intrinsic::riscv_vmv_s_x: {
3581     SDValue Scalar = Op.getOperand(2);
3582 
3583     if (Scalar.getValueType().bitsLE(XLenVT)) {
3584       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3585       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3586                          Op.getOperand(1), Scalar, Op.getOperand(3));
3587     }
3588 
3589     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3590 
3591     // This is an i64 value that lives in two scalar registers. We have to
3592     // insert this in a convoluted way. First we build vXi64 splat containing
3593     // the/ two values that we assemble using some bit math. Next we'll use
3594     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3595     // to merge element 0 from our splat into the source vector.
3596     // FIXME: This is probably not the best way to do this, but it is
3597     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3598     // point.
3599     //   sw lo, (a0)
3600     //   sw hi, 4(a0)
3601     //   vlse vX, (a0)
3602     //
3603     //   vid.v      vVid
3604     //   vmseq.vx   mMask, vVid, 0
3605     //   vmerge.vvm vDest, vSrc, vVal, mMask
3606     MVT VT = Op.getSimpleValueType();
3607     SDValue Vec = Op.getOperand(1);
3608     SDValue VL = Op.getOperand(3);
3609 
3610     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3611     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3612                                       DAG.getConstant(0, DL, MVT::i32), VL);
3613 
3614     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3615     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3616     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3617     SDValue SelectCond =
3618         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3619                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3620     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3621                        Vec, VL);
3622   }
3623   case Intrinsic::riscv_vslide1up:
3624   case Intrinsic::riscv_vslide1down:
3625   case Intrinsic::riscv_vslide1up_mask:
3626   case Intrinsic::riscv_vslide1down_mask: {
3627     // We need to special case these when the scalar is larger than XLen.
3628     unsigned NumOps = Op.getNumOperands();
3629     bool IsMasked = NumOps == 6;
3630     unsigned OpOffset = IsMasked ? 1 : 0;
3631     SDValue Scalar = Op.getOperand(2 + OpOffset);
3632     if (Scalar.getValueType().bitsLE(XLenVT))
3633       break;
3634 
3635     // Splatting a sign extended constant is fine.
3636     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3637       if (isInt<32>(CVal->getSExtValue()))
3638         break;
3639 
3640     MVT VT = Op.getSimpleValueType();
3641     assert(VT.getVectorElementType() == MVT::i64 &&
3642            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3643 
3644     // Convert the vector source to the equivalent nxvXi32 vector.
3645     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3646     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3647 
3648     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3649                                    DAG.getConstant(0, DL, XLenVT));
3650     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3651                                    DAG.getConstant(1, DL, XLenVT));
3652 
3653     // Double the VL since we halved SEW.
3654     SDValue VL = Op.getOperand(NumOps - 1);
3655     SDValue I32VL =
3656         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3657 
3658     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3659     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3660 
3661     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3662     // instructions.
3663     if (IntNo == Intrinsic::riscv_vslide1up ||
3664         IntNo == Intrinsic::riscv_vslide1up_mask) {
3665       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3666                         I32Mask, I32VL);
3667       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3668                         I32Mask, I32VL);
3669     } else {
3670       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3671                         I32Mask, I32VL);
3672       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3673                         I32Mask, I32VL);
3674     }
3675 
3676     // Convert back to nxvXi64.
3677     Vec = DAG.getBitcast(VT, Vec);
3678 
3679     if (!IsMasked)
3680       return Vec;
3681 
3682     // Apply mask after the operation.
3683     SDValue Mask = Op.getOperand(NumOps - 2);
3684     SDValue MaskedOff = Op.getOperand(1);
3685     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3686   }
3687   }
3688 
3689   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3690 }
3691 
3692 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3693                                                     SelectionDAG &DAG) const {
3694   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3695 }
3696 
3697 static MVT getLMUL1VT(MVT VT) {
3698   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3699          "Unexpected vector MVT");
3700   return MVT::getScalableVectorVT(
3701       VT.getVectorElementType(),
3702       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3703 }
3704 
3705 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3706   switch (ISDOpcode) {
3707   default:
3708     llvm_unreachable("Unhandled reduction");
3709   case ISD::VECREDUCE_ADD:
3710     return RISCVISD::VECREDUCE_ADD_VL;
3711   case ISD::VECREDUCE_UMAX:
3712     return RISCVISD::VECREDUCE_UMAX_VL;
3713   case ISD::VECREDUCE_SMAX:
3714     return RISCVISD::VECREDUCE_SMAX_VL;
3715   case ISD::VECREDUCE_UMIN:
3716     return RISCVISD::VECREDUCE_UMIN_VL;
3717   case ISD::VECREDUCE_SMIN:
3718     return RISCVISD::VECREDUCE_SMIN_VL;
3719   case ISD::VECREDUCE_AND:
3720     return RISCVISD::VECREDUCE_AND_VL;
3721   case ISD::VECREDUCE_OR:
3722     return RISCVISD::VECREDUCE_OR_VL;
3723   case ISD::VECREDUCE_XOR:
3724     return RISCVISD::VECREDUCE_XOR_VL;
3725   }
3726 }
3727 
3728 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3729                                                       SelectionDAG &DAG) const {
3730   SDLoc DL(Op);
3731   SDValue Vec = Op.getOperand(0);
3732   MVT VecVT = Vec.getSimpleValueType();
3733   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3734           Op.getOpcode() == ISD::VECREDUCE_OR ||
3735           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3736          "Unexpected reduction lowering");
3737 
3738   MVT XLenVT = Subtarget.getXLenVT();
3739   assert(Op.getValueType() == XLenVT &&
3740          "Expected reduction output to be legalized to XLenVT");
3741 
3742   MVT ContainerVT = VecVT;
3743   if (VecVT.isFixedLengthVector()) {
3744     ContainerVT = getContainerForFixedLengthVector(VecVT);
3745     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3746   }
3747 
3748   SDValue Mask, VL;
3749   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3750   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3751 
3752   switch (Op.getOpcode()) {
3753   default:
3754     llvm_unreachable("Unhandled reduction");
3755   case ISD::VECREDUCE_AND:
3756     // vpopc ~x == 0
3757     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3758     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3759     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3760   case ISD::VECREDUCE_OR:
3761     // vpopc x != 0
3762     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3763     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3764   case ISD::VECREDUCE_XOR: {
3765     // ((vpopc x) & 1) != 0
3766     SDValue One = DAG.getConstant(1, DL, XLenVT);
3767     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3768     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3769     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3770   }
3771   }
3772 }
3773 
3774 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3775                                             SelectionDAG &DAG) const {
3776   SDLoc DL(Op);
3777   SDValue Vec = Op.getOperand(0);
3778   EVT VecEVT = Vec.getValueType();
3779 
3780   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3781 
3782   // Due to ordering in legalize types we may have a vector type that needs to
3783   // be split. Do that manually so we can get down to a legal type.
3784   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3785          TargetLowering::TypeSplitVector) {
3786     SDValue Lo, Hi;
3787     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3788     VecEVT = Lo.getValueType();
3789     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3790   }
3791 
3792   // TODO: The type may need to be widened rather than split. Or widened before
3793   // it can be split.
3794   if (!isTypeLegal(VecEVT))
3795     return SDValue();
3796 
3797   MVT VecVT = VecEVT.getSimpleVT();
3798   MVT VecEltVT = VecVT.getVectorElementType();
3799   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3800 
3801   MVT ContainerVT = VecVT;
3802   if (VecVT.isFixedLengthVector()) {
3803     ContainerVT = getContainerForFixedLengthVector(VecVT);
3804     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3805   }
3806 
3807   MVT M1VT = getLMUL1VT(ContainerVT);
3808 
3809   SDValue Mask, VL;
3810   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3811 
3812   // FIXME: This is a VLMAX splat which might be too large and can prevent
3813   // vsetvli removal.
3814   SDValue NeutralElem =
3815       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3816   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3817   SDValue Reduction =
3818       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3819   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3820                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3821   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3822 }
3823 
3824 // Given a reduction op, this function returns the matching reduction opcode,
3825 // the vector SDValue and the scalar SDValue required to lower this to a
3826 // RISCVISD node.
3827 static std::tuple<unsigned, SDValue, SDValue>
3828 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3829   SDLoc DL(Op);
3830   auto Flags = Op->getFlags();
3831   unsigned Opcode = Op.getOpcode();
3832   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3833   switch (Opcode) {
3834   default:
3835     llvm_unreachable("Unhandled reduction");
3836   case ISD::VECREDUCE_FADD:
3837     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3838                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3839   case ISD::VECREDUCE_SEQ_FADD:
3840     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3841                            Op.getOperand(0));
3842   case ISD::VECREDUCE_FMIN:
3843     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3844                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3845   case ISD::VECREDUCE_FMAX:
3846     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3847                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3848   }
3849 }
3850 
3851 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3852                                               SelectionDAG &DAG) const {
3853   SDLoc DL(Op);
3854   MVT VecEltVT = Op.getSimpleValueType();
3855 
3856   unsigned RVVOpcode;
3857   SDValue VectorVal, ScalarVal;
3858   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3859       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3860   MVT VecVT = VectorVal.getSimpleValueType();
3861 
3862   MVT ContainerVT = VecVT;
3863   if (VecVT.isFixedLengthVector()) {
3864     ContainerVT = getContainerForFixedLengthVector(VecVT);
3865     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3866   }
3867 
3868   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3869 
3870   SDValue Mask, VL;
3871   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3872 
3873   // FIXME: This is a VLMAX splat which might be too large and can prevent
3874   // vsetvli removal.
3875   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3876   SDValue Reduction =
3877       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3878   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3879                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3880 }
3881 
3882 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3883                                                    SelectionDAG &DAG) const {
3884   SDValue Vec = Op.getOperand(0);
3885   SDValue SubVec = Op.getOperand(1);
3886   MVT VecVT = Vec.getSimpleValueType();
3887   MVT SubVecVT = SubVec.getSimpleValueType();
3888 
3889   SDLoc DL(Op);
3890   MVT XLenVT = Subtarget.getXLenVT();
3891   unsigned OrigIdx = Op.getConstantOperandVal(2);
3892   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3893 
3894   // We don't have the ability to slide mask vectors up indexed by their i1
3895   // elements; the smallest we can do is i8. Often we are able to bitcast to
3896   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3897   // into a scalable one, we might not necessarily have enough scalable
3898   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3899   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3900       (OrigIdx != 0 || !Vec.isUndef())) {
3901     if (VecVT.getVectorMinNumElements() >= 8 &&
3902         SubVecVT.getVectorMinNumElements() >= 8) {
3903       assert(OrigIdx % 8 == 0 && "Invalid index");
3904       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3905              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3906              "Unexpected mask vector lowering");
3907       OrigIdx /= 8;
3908       SubVecVT =
3909           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3910                            SubVecVT.isScalableVector());
3911       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3912                                VecVT.isScalableVector());
3913       Vec = DAG.getBitcast(VecVT, Vec);
3914       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3915     } else {
3916       // We can't slide this mask vector up indexed by its i1 elements.
3917       // This poses a problem when we wish to insert a scalable vector which
3918       // can't be re-expressed as a larger type. Just choose the slow path and
3919       // extend to a larger type, then truncate back down.
3920       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3921       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3922       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3923       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3924       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3925                         Op.getOperand(2));
3926       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3927       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3928     }
3929   }
3930 
3931   // If the subvector vector is a fixed-length type, we cannot use subregister
3932   // manipulation to simplify the codegen; we don't know which register of a
3933   // LMUL group contains the specific subvector as we only know the minimum
3934   // register size. Therefore we must slide the vector group up the full
3935   // amount.
3936   if (SubVecVT.isFixedLengthVector()) {
3937     if (OrigIdx == 0 && Vec.isUndef())
3938       return Op;
3939     MVT ContainerVT = VecVT;
3940     if (VecVT.isFixedLengthVector()) {
3941       ContainerVT = getContainerForFixedLengthVector(VecVT);
3942       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3943     }
3944     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3945                          DAG.getUNDEF(ContainerVT), SubVec,
3946                          DAG.getConstant(0, DL, XLenVT));
3947     SDValue Mask =
3948         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3949     // Set the vector length to only the number of elements we care about. Note
3950     // that for slideup this includes the offset.
3951     SDValue VL =
3952         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3953     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3954     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3955                                   SubVec, SlideupAmt, Mask, VL);
3956     if (VecVT.isFixedLengthVector())
3957       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3958     return DAG.getBitcast(Op.getValueType(), Slideup);
3959   }
3960 
3961   unsigned SubRegIdx, RemIdx;
3962   std::tie(SubRegIdx, RemIdx) =
3963       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3964           VecVT, SubVecVT, OrigIdx, TRI);
3965 
3966   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3967   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3968                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3969                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3970 
3971   // 1. If the Idx has been completely eliminated and this subvector's size is
3972   // a vector register or a multiple thereof, or the surrounding elements are
3973   // undef, then this is a subvector insert which naturally aligns to a vector
3974   // register. These can easily be handled using subregister manipulation.
3975   // 2. If the subvector is smaller than a vector register, then the insertion
3976   // must preserve the undisturbed elements of the register. We do this by
3977   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3978   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3979   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3980   // LMUL=1 type back into the larger vector (resolving to another subregister
3981   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3982   // to avoid allocating a large register group to hold our subvector.
3983   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3984     return Op;
3985 
3986   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3987   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3988   // (in our case undisturbed). This means we can set up a subvector insertion
3989   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3990   // size of the subvector.
3991   MVT InterSubVT = VecVT;
3992   SDValue AlignedExtract = Vec;
3993   unsigned AlignedIdx = OrigIdx - RemIdx;
3994   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3995     InterSubVT = getLMUL1VT(VecVT);
3996     // Extract a subvector equal to the nearest full vector register type. This
3997     // should resolve to a EXTRACT_SUBREG instruction.
3998     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3999                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
4000   }
4001 
4002   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4003   // For scalable vectors this must be further multiplied by vscale.
4004   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4005 
4006   SDValue Mask, VL;
4007   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4008 
4009   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4010   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4011   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4012   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4013 
4014   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4015                        DAG.getUNDEF(InterSubVT), SubVec,
4016                        DAG.getConstant(0, DL, XLenVT));
4017 
4018   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4019                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4020 
4021   // If required, insert this subvector back into the correct vector register.
4022   // This should resolve to an INSERT_SUBREG instruction.
4023   if (VecVT.bitsGT(InterSubVT))
4024     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4025                           DAG.getConstant(AlignedIdx, DL, XLenVT));
4026 
4027   // We might have bitcast from a mask type: cast back to the original type if
4028   // required.
4029   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4030 }
4031 
4032 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4033                                                     SelectionDAG &DAG) const {
4034   SDValue Vec = Op.getOperand(0);
4035   MVT SubVecVT = Op.getSimpleValueType();
4036   MVT VecVT = Vec.getSimpleValueType();
4037 
4038   SDLoc DL(Op);
4039   MVT XLenVT = Subtarget.getXLenVT();
4040   unsigned OrigIdx = Op.getConstantOperandVal(1);
4041   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4042 
4043   // We don't have the ability to slide mask vectors down indexed by their i1
4044   // elements; the smallest we can do is i8. Often we are able to bitcast to
4045   // equivalent i8 vectors. Note that when extracting a fixed-length vector
4046   // from a scalable one, we might not necessarily have enough scalable
4047   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4048   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4049     if (VecVT.getVectorMinNumElements() >= 8 &&
4050         SubVecVT.getVectorMinNumElements() >= 8) {
4051       assert(OrigIdx % 8 == 0 && "Invalid index");
4052       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4053              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4054              "Unexpected mask vector lowering");
4055       OrigIdx /= 8;
4056       SubVecVT =
4057           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4058                            SubVecVT.isScalableVector());
4059       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4060                                VecVT.isScalableVector());
4061       Vec = DAG.getBitcast(VecVT, Vec);
4062     } else {
4063       // We can't slide this mask vector down, indexed by its i1 elements.
4064       // This poses a problem when we wish to extract a scalable vector which
4065       // can't be re-expressed as a larger type. Just choose the slow path and
4066       // extend to a larger type, then truncate back down.
4067       // TODO: We could probably improve this when extracting certain fixed
4068       // from fixed, where we can extract as i8 and shift the correct element
4069       // right to reach the desired subvector?
4070       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4071       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4072       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4073       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4074                         Op.getOperand(1));
4075       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4076       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4077     }
4078   }
4079 
4080   // If the subvector vector is a fixed-length type, we cannot use subregister
4081   // manipulation to simplify the codegen; we don't know which register of a
4082   // LMUL group contains the specific subvector as we only know the minimum
4083   // register size. Therefore we must slide the vector group down the full
4084   // amount.
4085   if (SubVecVT.isFixedLengthVector()) {
4086     // With an index of 0 this is a cast-like subvector, which can be performed
4087     // with subregister operations.
4088     if (OrigIdx == 0)
4089       return Op;
4090     MVT ContainerVT = VecVT;
4091     if (VecVT.isFixedLengthVector()) {
4092       ContainerVT = getContainerForFixedLengthVector(VecVT);
4093       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4094     }
4095     SDValue Mask =
4096         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4097     // Set the vector length to only the number of elements we care about. This
4098     // avoids sliding down elements we're going to discard straight away.
4099     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4100     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4101     SDValue Slidedown =
4102         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4103                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4104     // Now we can use a cast-like subvector extract to get the result.
4105     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4106                             DAG.getConstant(0, DL, XLenVT));
4107     return DAG.getBitcast(Op.getValueType(), Slidedown);
4108   }
4109 
4110   unsigned SubRegIdx, RemIdx;
4111   std::tie(SubRegIdx, RemIdx) =
4112       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4113           VecVT, SubVecVT, OrigIdx, TRI);
4114 
4115   // If the Idx has been completely eliminated then this is a subvector extract
4116   // which naturally aligns to a vector register. These can easily be handled
4117   // using subregister manipulation.
4118   if (RemIdx == 0)
4119     return Op;
4120 
4121   // Else we must shift our vector register directly to extract the subvector.
4122   // Do this using VSLIDEDOWN.
4123 
4124   // If the vector type is an LMUL-group type, extract a subvector equal to the
4125   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4126   // instruction.
4127   MVT InterSubVT = VecVT;
4128   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4129     InterSubVT = getLMUL1VT(VecVT);
4130     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4131                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4132   }
4133 
4134   // Slide this vector register down by the desired number of elements in order
4135   // to place the desired subvector starting at element 0.
4136   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4137   // For scalable vectors this must be further multiplied by vscale.
4138   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4139 
4140   SDValue Mask, VL;
4141   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4142   SDValue Slidedown =
4143       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4144                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4145 
4146   // Now the vector is in the right position, extract our final subvector. This
4147   // should resolve to a COPY.
4148   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4149                           DAG.getConstant(0, DL, XLenVT));
4150 
4151   // We might have bitcast from a mask type: cast back to the original type if
4152   // required.
4153   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4154 }
4155 
4156 // Lower step_vector to the vid instruction. Any non-identity step value must
4157 // be accounted for my manual expansion.
4158 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4159                                               SelectionDAG &DAG) const {
4160   SDLoc DL(Op);
4161   MVT VT = Op.getSimpleValueType();
4162   MVT XLenVT = Subtarget.getXLenVT();
4163   SDValue Mask, VL;
4164   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4165   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4166   uint64_t StepValImm = Op.getConstantOperandVal(0);
4167   if (StepValImm != 1) {
4168     if (isPowerOf2_64(StepValImm)) {
4169       SDValue StepVal =
4170           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4171                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4172       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4173     } else {
4174       SDValue StepVal = lowerScalarSplat(
4175           DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4176           DL, DAG, Subtarget);
4177       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4178     }
4179   }
4180   return StepVec;
4181 }
4182 
4183 // Implement vector_reverse using vrgather.vv with indices determined by
4184 // subtracting the id of each element from (VLMAX-1). This will convert
4185 // the indices like so:
4186 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4187 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4188 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4189                                                  SelectionDAG &DAG) const {
4190   SDLoc DL(Op);
4191   MVT VecVT = Op.getSimpleValueType();
4192   unsigned EltSize = VecVT.getScalarSizeInBits();
4193   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4194 
4195   unsigned MaxVLMAX = 0;
4196   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4197   if (VectorBitsMax != 0)
4198     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4199 
4200   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4201   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4202 
4203   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4204   // to use vrgatherei16.vv.
4205   // TODO: It's also possible to use vrgatherei16.vv for other types to
4206   // decrease register width for the index calculation.
4207   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4208     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4209     // Reverse each half, then reassemble them in reverse order.
4210     // NOTE: It's also possible that after splitting that VLMAX no longer
4211     // requires vrgatherei16.vv.
4212     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4213       SDValue Lo, Hi;
4214       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4215       EVT LoVT, HiVT;
4216       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4217       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4218       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4219       // Reassemble the low and high pieces reversed.
4220       // FIXME: This is a CONCAT_VECTORS.
4221       SDValue Res =
4222           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4223                       DAG.getIntPtrConstant(0, DL));
4224       return DAG.getNode(
4225           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4226           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4227     }
4228 
4229     // Just promote the int type to i16 which will double the LMUL.
4230     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4231     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4232   }
4233 
4234   MVT XLenVT = Subtarget.getXLenVT();
4235   SDValue Mask, VL;
4236   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4237 
4238   // Calculate VLMAX-1 for the desired SEW.
4239   unsigned MinElts = VecVT.getVectorMinNumElements();
4240   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4241                               DAG.getConstant(MinElts, DL, XLenVT));
4242   SDValue VLMinus1 =
4243       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4244 
4245   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4246   bool IsRV32E64 =
4247       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4248   SDValue SplatVL;
4249   if (!IsRV32E64)
4250     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4251   else
4252     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4253 
4254   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4255   SDValue Indices =
4256       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4257 
4258   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4259 }
4260 
4261 SDValue
4262 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4263                                                      SelectionDAG &DAG) const {
4264   SDLoc DL(Op);
4265   auto *Load = cast<LoadSDNode>(Op);
4266 
4267   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4268                                         Load->getMemoryVT(),
4269                                         *Load->getMemOperand()) &&
4270          "Expecting a correctly-aligned load");
4271 
4272   MVT VT = Op.getSimpleValueType();
4273   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4274 
4275   SDValue VL =
4276       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4277 
4278   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4279   SDValue NewLoad = DAG.getMemIntrinsicNode(
4280       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4281       Load->getMemoryVT(), Load->getMemOperand());
4282 
4283   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4284   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4285 }
4286 
4287 SDValue
4288 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4289                                                       SelectionDAG &DAG) const {
4290   SDLoc DL(Op);
4291   auto *Store = cast<StoreSDNode>(Op);
4292 
4293   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4294                                         Store->getMemoryVT(),
4295                                         *Store->getMemOperand()) &&
4296          "Expecting a correctly-aligned store");
4297 
4298   SDValue StoreVal = Store->getValue();
4299   MVT VT = StoreVal.getSimpleValueType();
4300 
4301   // If the size less than a byte, we need to pad with zeros to make a byte.
4302   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4303     VT = MVT::v8i1;
4304     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4305                            DAG.getConstant(0, DL, VT), StoreVal,
4306                            DAG.getIntPtrConstant(0, DL));
4307   }
4308 
4309   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4310 
4311   SDValue VL =
4312       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4313 
4314   SDValue NewValue =
4315       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4316   return DAG.getMemIntrinsicNode(
4317       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4318       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4319       Store->getMemoryVT(), Store->getMemOperand());
4320 }
4321 
4322 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4323   auto *Load = cast<MaskedLoadSDNode>(Op);
4324 
4325   SDLoc DL(Op);
4326   MVT VT = Op.getSimpleValueType();
4327   MVT XLenVT = Subtarget.getXLenVT();
4328 
4329   SDValue Mask = Load->getMask();
4330   SDValue PassThru = Load->getPassThru();
4331   SDValue VL;
4332 
4333   MVT ContainerVT = VT;
4334   if (VT.isFixedLengthVector()) {
4335     ContainerVT = getContainerForFixedLengthVector(VT);
4336     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4337 
4338     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4339     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4340     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4341   } else
4342     VL = DAG.getRegister(RISCV::X0, XLenVT);
4343 
4344   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4345   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4346   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4347                    Load->getBasePtr(), Mask,  VL};
4348   SDValue Result =
4349       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4350                               Load->getMemoryVT(), Load->getMemOperand());
4351   SDValue Chain = Result.getValue(1);
4352 
4353   if (VT.isFixedLengthVector())
4354     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4355 
4356   return DAG.getMergeValues({Result, Chain}, DL);
4357 }
4358 
4359 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4360   auto *Store = cast<MaskedStoreSDNode>(Op);
4361 
4362   SDLoc DL(Op);
4363   SDValue Val = Store->getValue();
4364   SDValue Mask = Store->getMask();
4365   MVT VT = Val.getSimpleValueType();
4366   MVT XLenVT = Subtarget.getXLenVT();
4367   SDValue VL;
4368 
4369   MVT ContainerVT = VT;
4370   if (VT.isFixedLengthVector()) {
4371     ContainerVT = getContainerForFixedLengthVector(VT);
4372     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4373 
4374     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4375     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4376     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4377   } else
4378     VL = DAG.getRegister(RISCV::X0, XLenVT);
4379 
4380   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4381   return DAG.getMemIntrinsicNode(
4382       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4383       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4384       Store->getMemoryVT(), Store->getMemOperand());
4385 }
4386 
4387 SDValue
4388 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4389                                                       SelectionDAG &DAG) const {
4390   MVT InVT = Op.getOperand(0).getSimpleValueType();
4391   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4392 
4393   MVT VT = Op.getSimpleValueType();
4394 
4395   SDValue Op1 =
4396       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4397   SDValue Op2 =
4398       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4399 
4400   SDLoc DL(Op);
4401   SDValue VL =
4402       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4403 
4404   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4405   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4406 
4407   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4408                             Op.getOperand(2), Mask, VL);
4409 
4410   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4411 }
4412 
4413 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4414     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4415   MVT VT = Op.getSimpleValueType();
4416 
4417   if (VT.getVectorElementType() == MVT::i1)
4418     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4419 
4420   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4421 }
4422 
4423 SDValue
4424 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4425                                                       SelectionDAG &DAG) const {
4426   unsigned Opc;
4427   switch (Op.getOpcode()) {
4428   default: llvm_unreachable("Unexpected opcode!");
4429   case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4430   case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4431   case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4432   }
4433 
4434   return lowerToScalableOp(Op, DAG, Opc);
4435 }
4436 
4437 // Lower vector ABS to smax(X, sub(0, X)).
4438 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4439   SDLoc DL(Op);
4440   MVT VT = Op.getSimpleValueType();
4441   SDValue X = Op.getOperand(0);
4442 
4443   assert(VT.isFixedLengthVector() && "Unexpected type");
4444 
4445   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4446   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4447 
4448   SDValue Mask, VL;
4449   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4450 
4451   SDValue SplatZero =
4452       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4453                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4454   SDValue NegX =
4455       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4456   SDValue Max =
4457       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4458 
4459   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4460 }
4461 
4462 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4463     SDValue Op, SelectionDAG &DAG) const {
4464   SDLoc DL(Op);
4465   MVT VT = Op.getSimpleValueType();
4466   SDValue Mag = Op.getOperand(0);
4467   SDValue Sign = Op.getOperand(1);
4468   assert(Mag.getValueType() == Sign.getValueType() &&
4469          "Can only handle COPYSIGN with matching types.");
4470 
4471   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4472   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4473   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4474 
4475   SDValue Mask, VL;
4476   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4477 
4478   SDValue CopySign =
4479       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4480 
4481   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4482 }
4483 
4484 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4485     SDValue Op, SelectionDAG &DAG) const {
4486   MVT VT = Op.getSimpleValueType();
4487   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4488 
4489   MVT I1ContainerVT =
4490       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4491 
4492   SDValue CC =
4493       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4494   SDValue Op1 =
4495       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4496   SDValue Op2 =
4497       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4498 
4499   SDLoc DL(Op);
4500   SDValue Mask, VL;
4501   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4502 
4503   SDValue Select =
4504       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4505 
4506   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4507 }
4508 
4509 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4510                                                unsigned NewOpc,
4511                                                bool HasMask) const {
4512   MVT VT = Op.getSimpleValueType();
4513   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4514 
4515   // Create list of operands by converting existing ones to scalable types.
4516   SmallVector<SDValue, 6> Ops;
4517   for (const SDValue &V : Op->op_values()) {
4518     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4519 
4520     // Pass through non-vector operands.
4521     if (!V.getValueType().isVector()) {
4522       Ops.push_back(V);
4523       continue;
4524     }
4525 
4526     // "cast" fixed length vector to a scalable vector.
4527     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4528            "Only fixed length vectors are supported!");
4529     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4530   }
4531 
4532   SDLoc DL(Op);
4533   SDValue Mask, VL;
4534   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4535   if (HasMask)
4536     Ops.push_back(Mask);
4537   Ops.push_back(VL);
4538 
4539   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4540   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4541 }
4542 
4543 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4544 // * Operands of each node are assumed to be in the same order.
4545 // * The EVL operand is promoted from i32 to i64 on RV64.
4546 // * Fixed-length vectors are converted to their scalable-vector container
4547 //   types.
4548 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4549                                        unsigned RISCVISDOpc) const {
4550   SDLoc DL(Op);
4551   MVT VT = Op.getSimpleValueType();
4552   SmallVector<SDValue, 4> Ops;
4553 
4554   for (const auto &OpIdx : enumerate(Op->ops())) {
4555     SDValue V = OpIdx.value();
4556     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4557     // Pass through operands which aren't fixed-length vectors.
4558     if (!V.getValueType().isFixedLengthVector()) {
4559       Ops.push_back(V);
4560       continue;
4561     }
4562     // "cast" fixed length vector to a scalable vector.
4563     MVT OpVT = V.getSimpleValueType();
4564     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4565     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4566            "Only fixed length vectors are supported!");
4567     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4568   }
4569 
4570   if (!VT.isFixedLengthVector())
4571     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4572 
4573   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4574 
4575   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4576 
4577   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4578 }
4579 
4580 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4581 // a RVV indexed load. The RVV indexed load instructions only support the
4582 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4583 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4584 // indexing is extended to the XLEN value type and scaled accordingly.
4585 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4586   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4587   SDLoc DL(Op);
4588 
4589   SDValue Index = MGN->getIndex();
4590   SDValue Mask = MGN->getMask();
4591   SDValue PassThru = MGN->getPassThru();
4592 
4593   MVT VT = Op.getSimpleValueType();
4594   MVT IndexVT = Index.getSimpleValueType();
4595   MVT XLenVT = Subtarget.getXLenVT();
4596 
4597   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4598          "Unexpected VTs!");
4599   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4600          "Unexpected pointer type");
4601   // Targets have to explicitly opt-in for extending vector loads.
4602   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4603          "Unexpected extending MGATHER");
4604 
4605   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4606   // the selection of the masked intrinsics doesn't do this for us.
4607   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4608 
4609   SDValue VL;
4610   MVT ContainerVT = VT;
4611   if (VT.isFixedLengthVector()) {
4612     // We need to use the larger of the result and index type to determine the
4613     // scalable type to use so we don't increase LMUL for any operand/result.
4614     if (VT.bitsGE(IndexVT)) {
4615       ContainerVT = getContainerForFixedLengthVector(VT);
4616       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4617                                  ContainerVT.getVectorElementCount());
4618     } else {
4619       IndexVT = getContainerForFixedLengthVector(IndexVT);
4620       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4621                                      IndexVT.getVectorElementCount());
4622     }
4623 
4624     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4625 
4626     if (!IsUnmasked) {
4627       MVT MaskVT =
4628           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4629       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4630       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4631     }
4632 
4633     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4634   } else
4635     VL = DAG.getRegister(RISCV::X0, XLenVT);
4636 
4637   unsigned IntID =
4638       IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
4639   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4640                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4641   if (!IsUnmasked)
4642     Ops.push_back(PassThru);
4643   Ops.push_back(MGN->getBasePtr());
4644   Ops.push_back(Index);
4645   if (!IsUnmasked)
4646     Ops.push_back(Mask);
4647   Ops.push_back(VL);
4648 
4649   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4650   SDValue Result =
4651       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4652                               MGN->getMemoryVT(), MGN->getMemOperand());
4653   SDValue Chain = Result.getValue(1);
4654 
4655   if (VT.isFixedLengthVector())
4656     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4657 
4658   return DAG.getMergeValues({Result, Chain}, DL);
4659 }
4660 
4661 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4662 // a RVV indexed store. The RVV indexed store instructions only support the
4663 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4664 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4665 // indexing is extended to the XLEN value type and scaled accordingly.
4666 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4667                                            SelectionDAG &DAG) const {
4668   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4669   SDLoc DL(Op);
4670   SDValue Index = MSN->getIndex();
4671   SDValue Mask = MSN->getMask();
4672   SDValue Val = MSN->getValue();
4673 
4674   MVT VT = Val.getSimpleValueType();
4675   MVT IndexVT = Index.getSimpleValueType();
4676   MVT XLenVT = Subtarget.getXLenVT();
4677 
4678   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4679          "Unexpected VTs!");
4680   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4681          "Unexpected pointer type");
4682   // Targets have to explicitly opt-in for extending vector loads and
4683   // truncating vector stores.
4684   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4685 
4686   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4687   // the selection of the masked intrinsics doesn't do this for us.
4688   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4689 
4690   SDValue VL;
4691   if (VT.isFixedLengthVector()) {
4692     // We need to use the larger of the value and index type to determine the
4693     // scalable type to use so we don't increase LMUL for any operand/result.
4694     MVT ContainerVT;
4695     if (VT.bitsGE(IndexVT)) {
4696       ContainerVT = getContainerForFixedLengthVector(VT);
4697       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4698                                  ContainerVT.getVectorElementCount());
4699     } else {
4700       IndexVT = getContainerForFixedLengthVector(IndexVT);
4701       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4702                                      IndexVT.getVectorElementCount());
4703     }
4704 
4705     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4706     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4707 
4708     if (!IsUnmasked) {
4709       MVT MaskVT =
4710           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4711       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4712     }
4713 
4714     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4715   } else
4716     VL = DAG.getRegister(RISCV::X0, XLenVT);
4717 
4718   unsigned IntID =
4719       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4720   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4721                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4722   Ops.push_back(Val);
4723   Ops.push_back(MSN->getBasePtr());
4724   Ops.push_back(Index);
4725   if (!IsUnmasked)
4726     Ops.push_back(Mask);
4727   Ops.push_back(VL);
4728 
4729   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4730                                  MSN->getMemoryVT(), MSN->getMemOperand());
4731 }
4732 
4733 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4734                                                SelectionDAG &DAG) const {
4735   const MVT XLenVT = Subtarget.getXLenVT();
4736   SDLoc DL(Op);
4737   SDValue Chain = Op->getOperand(0);
4738   SDValue SysRegNo = DAG.getConstant(
4739       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4740   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4741   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4742 
4743   // Encoding used for rounding mode in RISCV differs from that used in
4744   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4745   // table, which consists of a sequence of 4-bit fields, each representing
4746   // corresponding FLT_ROUNDS mode.
4747   static const int Table =
4748       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4749       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4750       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4751       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4752       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4753 
4754   SDValue Shift =
4755       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4756   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4757                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4758   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4759                                DAG.getConstant(7, DL, XLenVT));
4760 
4761   return DAG.getMergeValues({Masked, Chain}, DL);
4762 }
4763 
4764 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4765                                                SelectionDAG &DAG) const {
4766   const MVT XLenVT = Subtarget.getXLenVT();
4767   SDLoc DL(Op);
4768   SDValue Chain = Op->getOperand(0);
4769   SDValue RMValue = Op->getOperand(1);
4770   SDValue SysRegNo = DAG.getConstant(
4771       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4772 
4773   // Encoding used for rounding mode in RISCV differs from that used in
4774   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4775   // a table, which consists of a sequence of 4-bit fields, each representing
4776   // corresponding RISCV mode.
4777   static const unsigned Table =
4778       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4779       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4780       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4781       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4782       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4783 
4784   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4785                               DAG.getConstant(2, DL, XLenVT));
4786   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4787                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4788   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4789                         DAG.getConstant(0x7, DL, XLenVT));
4790   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4791                      RMValue);
4792 }
4793 
4794 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4795 // form of the given Opcode.
4796 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4797   switch (Opcode) {
4798   default:
4799     llvm_unreachable("Unexpected opcode");
4800   case ISD::SHL:
4801     return RISCVISD::SLLW;
4802   case ISD::SRA:
4803     return RISCVISD::SRAW;
4804   case ISD::SRL:
4805     return RISCVISD::SRLW;
4806   case ISD::SDIV:
4807     return RISCVISD::DIVW;
4808   case ISD::UDIV:
4809     return RISCVISD::DIVUW;
4810   case ISD::UREM:
4811     return RISCVISD::REMUW;
4812   case ISD::ROTL:
4813     return RISCVISD::ROLW;
4814   case ISD::ROTR:
4815     return RISCVISD::RORW;
4816   case RISCVISD::GREV:
4817     return RISCVISD::GREVW;
4818   case RISCVISD::GORC:
4819     return RISCVISD::GORCW;
4820   }
4821 }
4822 
4823 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
4824 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
4825 // otherwise be promoted to i64, making it difficult to select the
4826 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
4827 // type i8/i16/i32 is lost.
4828 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4829                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4830   SDLoc DL(N);
4831   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4832   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4833   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4834   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4835   // ReplaceNodeResults requires we maintain the same type for the return value.
4836   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4837 }
4838 
4839 // Converts the given 32-bit operation to a i64 operation with signed extension
4840 // semantic to reduce the signed extension instructions.
4841 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4842   SDLoc DL(N);
4843   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4844   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4845   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4846   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4847                                DAG.getValueType(MVT::i32));
4848   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4849 }
4850 
4851 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4852                                              SmallVectorImpl<SDValue> &Results,
4853                                              SelectionDAG &DAG) const {
4854   SDLoc DL(N);
4855   switch (N->getOpcode()) {
4856   default:
4857     llvm_unreachable("Don't know how to custom type legalize this operation!");
4858   case ISD::STRICT_FP_TO_SINT:
4859   case ISD::STRICT_FP_TO_UINT:
4860   case ISD::FP_TO_SINT:
4861   case ISD::FP_TO_UINT: {
4862     bool IsStrict = N->isStrictFPOpcode();
4863     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4864            "Unexpected custom legalisation");
4865     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4866     // If the FP type needs to be softened, emit a library call using the 'si'
4867     // version. If we left it to default legalization we'd end up with 'di'. If
4868     // the FP type doesn't need to be softened just let generic type
4869     // legalization promote the result type.
4870     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4871         TargetLowering::TypeSoftenFloat)
4872       return;
4873     RTLIB::Libcall LC;
4874     if (N->getOpcode() == ISD::FP_TO_SINT ||
4875         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4876       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4877     else
4878       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4879     MakeLibCallOptions CallOptions;
4880     EVT OpVT = Op0.getValueType();
4881     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4882     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4883     SDValue Result;
4884     std::tie(Result, Chain) =
4885         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4886     Results.push_back(Result);
4887     if (IsStrict)
4888       Results.push_back(Chain);
4889     break;
4890   }
4891   case ISD::READCYCLECOUNTER: {
4892     assert(!Subtarget.is64Bit() &&
4893            "READCYCLECOUNTER only has custom type legalization on riscv32");
4894 
4895     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4896     SDValue RCW =
4897         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4898 
4899     Results.push_back(
4900         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4901     Results.push_back(RCW.getValue(2));
4902     break;
4903   }
4904   case ISD::MUL: {
4905     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4906     unsigned XLen = Subtarget.getXLen();
4907     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4908     if (Size > XLen) {
4909       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4910       SDValue LHS = N->getOperand(0);
4911       SDValue RHS = N->getOperand(1);
4912       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4913 
4914       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4915       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4916       // We need exactly one side to be unsigned.
4917       if (LHSIsU == RHSIsU)
4918         return;
4919 
4920       auto MakeMULPair = [&](SDValue S, SDValue U) {
4921         MVT XLenVT = Subtarget.getXLenVT();
4922         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4923         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4924         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4925         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4926         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4927       };
4928 
4929       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4930       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4931 
4932       // The other operand should be signed, but still prefer MULH when
4933       // possible.
4934       if (RHSIsU && LHSIsS && !RHSIsS)
4935         Results.push_back(MakeMULPair(LHS, RHS));
4936       else if (LHSIsU && RHSIsS && !LHSIsS)
4937         Results.push_back(MakeMULPair(RHS, LHS));
4938 
4939       return;
4940     }
4941     LLVM_FALLTHROUGH;
4942   }
4943   case ISD::ADD:
4944   case ISD::SUB:
4945     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4946            "Unexpected custom legalisation");
4947     if (N->getOperand(1).getOpcode() == ISD::Constant)
4948       return;
4949     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4950     break;
4951   case ISD::SHL:
4952   case ISD::SRA:
4953   case ISD::SRL:
4954     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4955            "Unexpected custom legalisation");
4956     if (N->getOperand(1).getOpcode() == ISD::Constant)
4957       return;
4958     Results.push_back(customLegalizeToWOp(N, DAG));
4959     break;
4960   case ISD::ROTL:
4961   case ISD::ROTR:
4962     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4963            "Unexpected custom legalisation");
4964     Results.push_back(customLegalizeToWOp(N, DAG));
4965     break;
4966   case ISD::CTTZ:
4967   case ISD::CTTZ_ZERO_UNDEF:
4968   case ISD::CTLZ:
4969   case ISD::CTLZ_ZERO_UNDEF: {
4970     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4971            "Unexpected custom legalisation");
4972 
4973     SDValue NewOp0 =
4974         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4975     bool IsCTZ =
4976         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4977     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4978     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4979     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4980     return;
4981   }
4982   case ISD::SDIV:
4983   case ISD::UDIV:
4984   case ISD::UREM: {
4985     MVT VT = N->getSimpleValueType(0);
4986     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4987            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4988            "Unexpected custom legalisation");
4989     // Don't promote division/remainder by constant since we should expand those
4990     // to multiply by magic constant.
4991     // FIXME: What if the expansion is disabled for minsize.
4992     if (N->getOperand(1).getOpcode() == ISD::Constant)
4993       return;
4994 
4995     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4996     // the upper 32 bits. For other types we need to sign or zero extend
4997     // based on the opcode.
4998     unsigned ExtOpc = ISD::ANY_EXTEND;
4999     if (VT != MVT::i32)
5000       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5001                                            : ISD::ZERO_EXTEND;
5002 
5003     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5004     break;
5005   }
5006   case ISD::UADDO:
5007   case ISD::USUBO: {
5008     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5009            "Unexpected custom legalisation");
5010     bool IsAdd = N->getOpcode() == ISD::UADDO;
5011     // Create an ADDW or SUBW.
5012     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5013     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5014     SDValue Res =
5015         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5016     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5017                       DAG.getValueType(MVT::i32));
5018 
5019     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5020     // Since the inputs are sign extended from i32, this is equivalent to
5021     // comparing the lower 32 bits.
5022     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5023     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5024                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
5025 
5026     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5027     Results.push_back(Overflow);
5028     return;
5029   }
5030   case ISD::UADDSAT:
5031   case ISD::USUBSAT: {
5032     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5033            "Unexpected custom legalisation");
5034     if (Subtarget.hasStdExtZbb()) {
5035       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5036       // sign extend allows overflow of the lower 32 bits to be detected on
5037       // the promoted size.
5038       SDValue LHS =
5039           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5040       SDValue RHS =
5041           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5042       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5043       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5044       return;
5045     }
5046 
5047     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5048     // promotion for UADDO/USUBO.
5049     Results.push_back(expandAddSubSat(N, DAG));
5050     return;
5051   }
5052   case ISD::BITCAST: {
5053     EVT VT = N->getValueType(0);
5054     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5055     SDValue Op0 = N->getOperand(0);
5056     EVT Op0VT = Op0.getValueType();
5057     MVT XLenVT = Subtarget.getXLenVT();
5058     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5059       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5060       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5061     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5062                Subtarget.hasStdExtF()) {
5063       SDValue FPConv =
5064           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5065       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5066     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5067                isTypeLegal(Op0VT)) {
5068       // Custom-legalize bitcasts from fixed-length vector types to illegal
5069       // scalar types in order to improve codegen. Bitcast the vector to a
5070       // one-element vector type whose element type is the same as the result
5071       // type, and extract the first element.
5072       LLVMContext &Context = *DAG.getContext();
5073       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5074       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5075                                     DAG.getConstant(0, DL, XLenVT)));
5076     }
5077     break;
5078   }
5079   case RISCVISD::GREV:
5080   case RISCVISD::GORC: {
5081     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5082            "Unexpected custom legalisation");
5083     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5084     // This is similar to customLegalizeToWOp, except that we pass the second
5085     // operand (a TargetConstant) straight through: it is already of type
5086     // XLenVT.
5087     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5088     SDValue NewOp0 =
5089         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5090     SDValue NewOp1 =
5091         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5092     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5093     // ReplaceNodeResults requires we maintain the same type for the return
5094     // value.
5095     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5096     break;
5097   }
5098   case RISCVISD::SHFL: {
5099     // There is no SHFLIW instruction, but we can just promote the operation.
5100     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5101            "Unexpected custom legalisation");
5102     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5103     SDValue NewOp0 =
5104         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5105     SDValue NewOp1 =
5106         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5107     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5108     // ReplaceNodeResults requires we maintain the same type for the return
5109     // value.
5110     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5111     break;
5112   }
5113   case ISD::BSWAP:
5114   case ISD::BITREVERSE: {
5115     MVT VT = N->getSimpleValueType(0);
5116     MVT XLenVT = Subtarget.getXLenVT();
5117     assert((VT == MVT::i8 || VT == MVT::i16 ||
5118             (VT == MVT::i32 && Subtarget.is64Bit())) &&
5119            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5120     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5121     unsigned Imm = VT.getSizeInBits() - 1;
5122     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5123     if (N->getOpcode() == ISD::BSWAP)
5124       Imm &= ~0x7U;
5125     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5126     SDValue GREVI =
5127         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5128     // ReplaceNodeResults requires we maintain the same type for the return
5129     // value.
5130     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5131     break;
5132   }
5133   case ISD::FSHL:
5134   case ISD::FSHR: {
5135     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5136            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5137     SDValue NewOp0 =
5138         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5139     SDValue NewOp1 =
5140         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5141     SDValue NewOp2 =
5142         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5143     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5144     // Mask the shift amount to 5 bits.
5145     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5146                          DAG.getConstant(0x1f, DL, MVT::i64));
5147     unsigned Opc =
5148         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5149     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5150     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5151     break;
5152   }
5153   case ISD::EXTRACT_VECTOR_ELT: {
5154     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5155     // type is illegal (currently only vXi64 RV32).
5156     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5157     // transferred to the destination register. We issue two of these from the
5158     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5159     // first element.
5160     SDValue Vec = N->getOperand(0);
5161     SDValue Idx = N->getOperand(1);
5162 
5163     // The vector type hasn't been legalized yet so we can't issue target
5164     // specific nodes if it needs legalization.
5165     // FIXME: We would manually legalize if it's important.
5166     if (!isTypeLegal(Vec.getValueType()))
5167       return;
5168 
5169     MVT VecVT = Vec.getSimpleValueType();
5170 
5171     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5172            VecVT.getVectorElementType() == MVT::i64 &&
5173            "Unexpected EXTRACT_VECTOR_ELT legalization");
5174 
5175     // If this is a fixed vector, we need to convert it to a scalable vector.
5176     MVT ContainerVT = VecVT;
5177     if (VecVT.isFixedLengthVector()) {
5178       ContainerVT = getContainerForFixedLengthVector(VecVT);
5179       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5180     }
5181 
5182     MVT XLenVT = Subtarget.getXLenVT();
5183 
5184     // Use a VL of 1 to avoid processing more elements than we need.
5185     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5186     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5187     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5188 
5189     // Unless the index is known to be 0, we must slide the vector down to get
5190     // the desired element into index 0.
5191     if (!isNullConstant(Idx)) {
5192       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5193                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5194     }
5195 
5196     // Extract the lower XLEN bits of the correct vector element.
5197     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5198 
5199     // To extract the upper XLEN bits of the vector element, shift the first
5200     // element right by 32 bits and re-extract the lower XLEN bits.
5201     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5202                                      DAG.getConstant(32, DL, XLenVT), VL);
5203     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5204                                  ThirtyTwoV, Mask, VL);
5205 
5206     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5207 
5208     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5209     break;
5210   }
5211   case ISD::INTRINSIC_WO_CHAIN: {
5212     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5213     switch (IntNo) {
5214     default:
5215       llvm_unreachable(
5216           "Don't know how to custom type legalize this intrinsic!");
5217     case Intrinsic::riscv_orc_b: {
5218       // Lower to the GORCI encoding for orc.b with the operand extended.
5219       SDValue NewOp =
5220           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5221       // If Zbp is enabled, use GORCIW which will sign extend the result.
5222       unsigned Opc =
5223           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5224       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5225                                 DAG.getConstant(7, DL, MVT::i64));
5226       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5227       return;
5228     }
5229     case Intrinsic::riscv_grev:
5230     case Intrinsic::riscv_gorc: {
5231       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5232              "Unexpected custom legalisation");
5233       SDValue NewOp1 =
5234           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5235       SDValue NewOp2 =
5236           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5237       unsigned Opc =
5238           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5239       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5240       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5241       break;
5242     }
5243     case Intrinsic::riscv_shfl:
5244     case Intrinsic::riscv_unshfl: {
5245       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5246              "Unexpected custom legalisation");
5247       SDValue NewOp1 =
5248           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5249       SDValue NewOp2 =
5250           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5251       unsigned Opc =
5252           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5253       if (isa<ConstantSDNode>(N->getOperand(2))) {
5254         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5255                              DAG.getConstant(0xf, DL, MVT::i64));
5256         Opc =
5257             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5258       }
5259       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5260       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5261       break;
5262     }
5263     case Intrinsic::riscv_bcompress:
5264     case Intrinsic::riscv_bdecompress: {
5265       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5266              "Unexpected custom legalisation");
5267       SDValue NewOp1 =
5268           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5269       SDValue NewOp2 =
5270           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5271       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5272                          ? RISCVISD::BCOMPRESSW
5273                          : RISCVISD::BDECOMPRESSW;
5274       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5275       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5276       break;
5277     }
5278     case Intrinsic::riscv_vmv_x_s: {
5279       EVT VT = N->getValueType(0);
5280       MVT XLenVT = Subtarget.getXLenVT();
5281       if (VT.bitsLT(XLenVT)) {
5282         // Simple case just extract using vmv.x.s and truncate.
5283         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5284                                       Subtarget.getXLenVT(), N->getOperand(1));
5285         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5286         return;
5287       }
5288 
5289       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5290              "Unexpected custom legalization");
5291 
5292       // We need to do the move in two steps.
5293       SDValue Vec = N->getOperand(1);
5294       MVT VecVT = Vec.getSimpleValueType();
5295 
5296       // First extract the lower XLEN bits of the element.
5297       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5298 
5299       // To extract the upper XLEN bits of the vector element, shift the first
5300       // element right by 32 bits and re-extract the lower XLEN bits.
5301       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5302       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5303       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5304       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5305                                        DAG.getConstant(32, DL, XLenVT), VL);
5306       SDValue LShr32 =
5307           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5308       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5309 
5310       Results.push_back(
5311           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5312       break;
5313     }
5314     }
5315     break;
5316   }
5317   case ISD::VECREDUCE_ADD:
5318   case ISD::VECREDUCE_AND:
5319   case ISD::VECREDUCE_OR:
5320   case ISD::VECREDUCE_XOR:
5321   case ISD::VECREDUCE_SMAX:
5322   case ISD::VECREDUCE_UMAX:
5323   case ISD::VECREDUCE_SMIN:
5324   case ISD::VECREDUCE_UMIN:
5325     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5326       Results.push_back(V);
5327     break;
5328   case ISD::FLT_ROUNDS_: {
5329     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5330     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5331     Results.push_back(Res.getValue(0));
5332     Results.push_back(Res.getValue(1));
5333     break;
5334   }
5335   }
5336 }
5337 
5338 // A structure to hold one of the bit-manipulation patterns below. Together, a
5339 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5340 //   (or (and (shl x, 1), 0xAAAAAAAA),
5341 //       (and (srl x, 1), 0x55555555))
5342 struct RISCVBitmanipPat {
5343   SDValue Op;
5344   unsigned ShAmt;
5345   bool IsSHL;
5346 
5347   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5348     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5349   }
5350 };
5351 
5352 // Matches patterns of the form
5353 //   (and (shl x, C2), (C1 << C2))
5354 //   (and (srl x, C2), C1)
5355 //   (shl (and x, C1), C2)
5356 //   (srl (and x, (C1 << C2)), C2)
5357 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5358 // The expected masks for each shift amount are specified in BitmanipMasks where
5359 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5360 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5361 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5362 // XLen is 64.
5363 static Optional<RISCVBitmanipPat>
5364 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5365   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5366          "Unexpected number of masks");
5367   Optional<uint64_t> Mask;
5368   // Optionally consume a mask around the shift operation.
5369   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5370     Mask = Op.getConstantOperandVal(1);
5371     Op = Op.getOperand(0);
5372   }
5373   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5374     return None;
5375   bool IsSHL = Op.getOpcode() == ISD::SHL;
5376 
5377   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5378     return None;
5379   uint64_t ShAmt = Op.getConstantOperandVal(1);
5380 
5381   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5382   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5383     return None;
5384   // If we don't have enough masks for 64 bit, then we must be trying to
5385   // match SHFL so we're only allowed to shift 1/4 of the width.
5386   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5387     return None;
5388 
5389   SDValue Src = Op.getOperand(0);
5390 
5391   // The expected mask is shifted left when the AND is found around SHL
5392   // patterns.
5393   //   ((x >> 1) & 0x55555555)
5394   //   ((x << 1) & 0xAAAAAAAA)
5395   bool SHLExpMask = IsSHL;
5396 
5397   if (!Mask) {
5398     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5399     // the mask is all ones: consume that now.
5400     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5401       Mask = Src.getConstantOperandVal(1);
5402       Src = Src.getOperand(0);
5403       // The expected mask is now in fact shifted left for SRL, so reverse the
5404       // decision.
5405       //   ((x & 0xAAAAAAAA) >> 1)
5406       //   ((x & 0x55555555) << 1)
5407       SHLExpMask = !SHLExpMask;
5408     } else {
5409       // Use a default shifted mask of all-ones if there's no AND, truncated
5410       // down to the expected width. This simplifies the logic later on.
5411       Mask = maskTrailingOnes<uint64_t>(Width);
5412       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5413     }
5414   }
5415 
5416   unsigned MaskIdx = Log2_32(ShAmt);
5417   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5418 
5419   if (SHLExpMask)
5420     ExpMask <<= ShAmt;
5421 
5422   if (Mask != ExpMask)
5423     return None;
5424 
5425   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5426 }
5427 
5428 // Matches any of the following bit-manipulation patterns:
5429 //   (and (shl x, 1), (0x55555555 << 1))
5430 //   (and (srl x, 1), 0x55555555)
5431 //   (shl (and x, 0x55555555), 1)
5432 //   (srl (and x, (0x55555555 << 1)), 1)
5433 // where the shift amount and mask may vary thus:
5434 //   [1]  = 0x55555555 / 0xAAAAAAAA
5435 //   [2]  = 0x33333333 / 0xCCCCCCCC
5436 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5437 //   [8]  = 0x00FF00FF / 0xFF00FF00
5438 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5439 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5440 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5441   // These are the unshifted masks which we use to match bit-manipulation
5442   // patterns. They may be shifted left in certain circumstances.
5443   static const uint64_t BitmanipMasks[] = {
5444       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5445       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5446 
5447   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5448 }
5449 
5450 // Match the following pattern as a GREVI(W) operation
5451 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5452 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5453                                const RISCVSubtarget &Subtarget) {
5454   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5455   EVT VT = Op.getValueType();
5456 
5457   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5458     auto LHS = matchGREVIPat(Op.getOperand(0));
5459     auto RHS = matchGREVIPat(Op.getOperand(1));
5460     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5461       SDLoc DL(Op);
5462       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5463                          DAG.getConstant(LHS->ShAmt, DL, VT));
5464     }
5465   }
5466   return SDValue();
5467 }
5468 
5469 // Matches any the following pattern as a GORCI(W) operation
5470 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5471 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5472 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5473 // Note that with the variant of 3.,
5474 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5475 // the inner pattern will first be matched as GREVI and then the outer
5476 // pattern will be matched to GORC via the first rule above.
5477 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5478 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5479                                const RISCVSubtarget &Subtarget) {
5480   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5481   EVT VT = Op.getValueType();
5482 
5483   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5484     SDLoc DL(Op);
5485     SDValue Op0 = Op.getOperand(0);
5486     SDValue Op1 = Op.getOperand(1);
5487 
5488     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5489       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5490           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5491           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5492         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5493       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5494       if ((Reverse.getOpcode() == ISD::ROTL ||
5495            Reverse.getOpcode() == ISD::ROTR) &&
5496           Reverse.getOperand(0) == X &&
5497           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5498         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5499         if (RotAmt == (VT.getSizeInBits() / 2))
5500           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5501                              DAG.getConstant(RotAmt, DL, VT));
5502       }
5503       return SDValue();
5504     };
5505 
5506     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5507     if (SDValue V = MatchOROfReverse(Op0, Op1))
5508       return V;
5509     if (SDValue V = MatchOROfReverse(Op1, Op0))
5510       return V;
5511 
5512     // OR is commutable so canonicalize its OR operand to the left
5513     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5514       std::swap(Op0, Op1);
5515     if (Op0.getOpcode() != ISD::OR)
5516       return SDValue();
5517     SDValue OrOp0 = Op0.getOperand(0);
5518     SDValue OrOp1 = Op0.getOperand(1);
5519     auto LHS = matchGREVIPat(OrOp0);
5520     // OR is commutable so swap the operands and try again: x might have been
5521     // on the left
5522     if (!LHS) {
5523       std::swap(OrOp0, OrOp1);
5524       LHS = matchGREVIPat(OrOp0);
5525     }
5526     auto RHS = matchGREVIPat(Op1);
5527     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5528       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5529                          DAG.getConstant(LHS->ShAmt, DL, VT));
5530     }
5531   }
5532   return SDValue();
5533 }
5534 
5535 // Matches any of the following bit-manipulation patterns:
5536 //   (and (shl x, 1), (0x22222222 << 1))
5537 //   (and (srl x, 1), 0x22222222)
5538 //   (shl (and x, 0x22222222), 1)
5539 //   (srl (and x, (0x22222222 << 1)), 1)
5540 // where the shift amount and mask may vary thus:
5541 //   [1]  = 0x22222222 / 0x44444444
5542 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5543 //   [4]  = 0x00F000F0 / 0x0F000F00
5544 //   [8]  = 0x0000FF00 / 0x00FF0000
5545 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5546 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5547   // These are the unshifted masks which we use to match bit-manipulation
5548   // patterns. They may be shifted left in certain circumstances.
5549   static const uint64_t BitmanipMasks[] = {
5550       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5551       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5552 
5553   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5554 }
5555 
5556 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5557 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5558                                const RISCVSubtarget &Subtarget) {
5559   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5560   EVT VT = Op.getValueType();
5561 
5562   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5563     return SDValue();
5564 
5565   SDValue Op0 = Op.getOperand(0);
5566   SDValue Op1 = Op.getOperand(1);
5567 
5568   // Or is commutable so canonicalize the second OR to the LHS.
5569   if (Op0.getOpcode() != ISD::OR)
5570     std::swap(Op0, Op1);
5571   if (Op0.getOpcode() != ISD::OR)
5572     return SDValue();
5573 
5574   // We found an inner OR, so our operands are the operands of the inner OR
5575   // and the other operand of the outer OR.
5576   SDValue A = Op0.getOperand(0);
5577   SDValue B = Op0.getOperand(1);
5578   SDValue C = Op1;
5579 
5580   auto Match1 = matchSHFLPat(A);
5581   auto Match2 = matchSHFLPat(B);
5582 
5583   // If neither matched, we failed.
5584   if (!Match1 && !Match2)
5585     return SDValue();
5586 
5587   // We had at least one match. if one failed, try the remaining C operand.
5588   if (!Match1) {
5589     std::swap(A, C);
5590     Match1 = matchSHFLPat(A);
5591     if (!Match1)
5592       return SDValue();
5593   } else if (!Match2) {
5594     std::swap(B, C);
5595     Match2 = matchSHFLPat(B);
5596     if (!Match2)
5597       return SDValue();
5598   }
5599   assert(Match1 && Match2);
5600 
5601   // Make sure our matches pair up.
5602   if (!Match1->formsPairWith(*Match2))
5603     return SDValue();
5604 
5605   // All the remains is to make sure C is an AND with the same input, that masks
5606   // out the bits that are being shuffled.
5607   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5608       C.getOperand(0) != Match1->Op)
5609     return SDValue();
5610 
5611   uint64_t Mask = C.getConstantOperandVal(1);
5612 
5613   static const uint64_t BitmanipMasks[] = {
5614       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5615       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5616   };
5617 
5618   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5619   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5620   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5621 
5622   if (Mask != ExpMask)
5623     return SDValue();
5624 
5625   SDLoc DL(Op);
5626   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5627                      DAG.getConstant(Match1->ShAmt, DL, VT));
5628 }
5629 
5630 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5631 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5632 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5633 // not undo itself, but they are redundant.
5634 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5635   SDValue Src = N->getOperand(0);
5636 
5637   if (Src.getOpcode() != N->getOpcode())
5638     return SDValue();
5639 
5640   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5641       !isa<ConstantSDNode>(Src.getOperand(1)))
5642     return SDValue();
5643 
5644   unsigned ShAmt1 = N->getConstantOperandVal(1);
5645   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5646   Src = Src.getOperand(0);
5647 
5648   unsigned CombinedShAmt;
5649   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5650     CombinedShAmt = ShAmt1 | ShAmt2;
5651   else
5652     CombinedShAmt = ShAmt1 ^ ShAmt2;
5653 
5654   if (CombinedShAmt == 0)
5655     return Src;
5656 
5657   SDLoc DL(N);
5658   return DAG.getNode(
5659       N->getOpcode(), DL, N->getValueType(0), Src,
5660       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5661 }
5662 
5663 // Combine a constant select operand into its use:
5664 //
5665 // (and (select_cc lhs, rhs, cc, -1, c), x)
5666 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5667 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5668 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5669 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5670 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5671 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5672                                      SelectionDAG &DAG, bool AllOnes) {
5673   EVT VT = N->getValueType(0);
5674 
5675   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5676     return SDValue();
5677 
5678   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5679     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5680   };
5681 
5682   bool SwapSelectOps;
5683   SDValue TrueVal = Slct.getOperand(3);
5684   SDValue FalseVal = Slct.getOperand(4);
5685   SDValue NonConstantVal;
5686   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5687     SwapSelectOps = false;
5688     NonConstantVal = FalseVal;
5689   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5690     SwapSelectOps = true;
5691     NonConstantVal = TrueVal;
5692   } else
5693     return SDValue();
5694 
5695   // Slct is now know to be the desired identity constant when CC is true.
5696   TrueVal = OtherOp;
5697   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5698   // Unless SwapSelectOps says CC should be false.
5699   if (SwapSelectOps)
5700     std::swap(TrueVal, FalseVal);
5701 
5702   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5703                      {Slct.getOperand(0), Slct.getOperand(1),
5704                       Slct.getOperand(2), TrueVal, FalseVal});
5705 }
5706 
5707 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5708 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5709                                                 bool AllOnes) {
5710   SDValue N0 = N->getOperand(0);
5711   SDValue N1 = N->getOperand(1);
5712   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5713     return Result;
5714   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5715     return Result;
5716   return SDValue();
5717 }
5718 
5719 static SDValue performANDCombine(SDNode *N,
5720                                  TargetLowering::DAGCombinerInfo &DCI,
5721                                  const RISCVSubtarget &Subtarget) {
5722   SelectionDAG &DAG = DCI.DAG;
5723 
5724   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5725   //      (select lhs, rhs, cc, x, (and x, y))
5726   return combineSelectCCAndUseCommutative(N, DAG, true);
5727 }
5728 
5729 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5730                                 const RISCVSubtarget &Subtarget) {
5731   SelectionDAG &DAG = DCI.DAG;
5732   if (Subtarget.hasStdExtZbp()) {
5733     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5734       return GREV;
5735     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5736       return GORC;
5737     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5738       return SHFL;
5739   }
5740 
5741   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5742   //      (select lhs, rhs, cc, x, (or x, y))
5743   return combineSelectCCAndUseCommutative(N, DAG, false);
5744 }
5745 
5746 static SDValue performXORCombine(SDNode *N,
5747                                  TargetLowering::DAGCombinerInfo &DCI,
5748                                  const RISCVSubtarget &Subtarget) {
5749   SelectionDAG &DAG = DCI.DAG;
5750 
5751   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5752   //      (select lhs, rhs, cc, x, (xor x, y))
5753   return combineSelectCCAndUseCommutative(N, DAG, false);
5754 }
5755 
5756 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
5757 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
5758 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
5759 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
5760 // ADDW/SUBW/MULW.
5761 static SDValue performANY_EXTENDCombine(SDNode *N,
5762                                         TargetLowering::DAGCombinerInfo &DCI,
5763                                         const RISCVSubtarget &Subtarget) {
5764   if (!Subtarget.is64Bit())
5765     return SDValue();
5766 
5767   SelectionDAG &DAG = DCI.DAG;
5768 
5769   SDValue Src = N->getOperand(0);
5770   EVT VT = N->getValueType(0);
5771   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
5772     return SDValue();
5773 
5774   // The opcode must be one that can implicitly sign_extend.
5775   // FIXME: Additional opcodes.
5776   switch (Src.getOpcode()) {
5777   default:
5778     return SDValue();
5779   case ISD::MUL:
5780     if (!Subtarget.hasStdExtM())
5781       return SDValue();
5782     LLVM_FALLTHROUGH;
5783   case ISD::ADD:
5784   case ISD::SUB:
5785     break;
5786   }
5787 
5788   SmallVector<SDNode *, 4> SetCCs;
5789   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
5790                             UE = Src.getNode()->use_end();
5791        UI != UE; ++UI) {
5792     SDNode *User = *UI;
5793     if (User == N)
5794       continue;
5795     if (UI.getUse().getResNo() != Src.getResNo())
5796       continue;
5797     // All i32 setccs are legalized by sign extending operands.
5798     if (User->getOpcode() == ISD::SETCC) {
5799       SetCCs.push_back(User);
5800       continue;
5801     }
5802     // We don't know if we can extend this user.
5803     break;
5804   }
5805 
5806   // If we don't have any SetCCs, this isn't worthwhile.
5807   if (SetCCs.empty())
5808     return SDValue();
5809 
5810   SDLoc DL(N);
5811   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
5812   DCI.CombineTo(N, SExt);
5813 
5814   // Promote all the setccs.
5815   for (SDNode *SetCC : SetCCs) {
5816     SmallVector<SDValue, 4> Ops;
5817 
5818     for (unsigned j = 0; j != 2; ++j) {
5819       SDValue SOp = SetCC->getOperand(j);
5820       if (SOp == Src)
5821         Ops.push_back(SExt);
5822       else
5823         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
5824     }
5825 
5826     Ops.push_back(SetCC->getOperand(2));
5827     DCI.CombineTo(SetCC,
5828                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5829   }
5830   return SDValue(N, 0);
5831 }
5832 
5833 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5834                                                DAGCombinerInfo &DCI) const {
5835   SelectionDAG &DAG = DCI.DAG;
5836 
5837   switch (N->getOpcode()) {
5838   default:
5839     break;
5840   case RISCVISD::SplitF64: {
5841     SDValue Op0 = N->getOperand(0);
5842     // If the input to SplitF64 is just BuildPairF64 then the operation is
5843     // redundant. Instead, use BuildPairF64's operands directly.
5844     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5845       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5846 
5847     SDLoc DL(N);
5848 
5849     // It's cheaper to materialise two 32-bit integers than to load a double
5850     // from the constant pool and transfer it to integer registers through the
5851     // stack.
5852     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5853       APInt V = C->getValueAPF().bitcastToAPInt();
5854       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5855       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5856       return DCI.CombineTo(N, Lo, Hi);
5857     }
5858 
5859     // This is a target-specific version of a DAGCombine performed in
5860     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5861     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5862     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5863     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5864         !Op0.getNode()->hasOneUse())
5865       break;
5866     SDValue NewSplitF64 =
5867         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5868                     Op0.getOperand(0));
5869     SDValue Lo = NewSplitF64.getValue(0);
5870     SDValue Hi = NewSplitF64.getValue(1);
5871     APInt SignBit = APInt::getSignMask(32);
5872     if (Op0.getOpcode() == ISD::FNEG) {
5873       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5874                                   DAG.getConstant(SignBit, DL, MVT::i32));
5875       return DCI.CombineTo(N, Lo, NewHi);
5876     }
5877     assert(Op0.getOpcode() == ISD::FABS);
5878     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5879                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5880     return DCI.CombineTo(N, Lo, NewHi);
5881   }
5882   case RISCVISD::SLLW:
5883   case RISCVISD::SRAW:
5884   case RISCVISD::SRLW:
5885   case RISCVISD::ROLW:
5886   case RISCVISD::RORW: {
5887     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5888     SDValue LHS = N->getOperand(0);
5889     SDValue RHS = N->getOperand(1);
5890     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5891     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5892     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5893         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5894       if (N->getOpcode() != ISD::DELETED_NODE)
5895         DCI.AddToWorklist(N);
5896       return SDValue(N, 0);
5897     }
5898     break;
5899   }
5900   case RISCVISD::CLZW:
5901   case RISCVISD::CTZW: {
5902     // Only the lower 32 bits of the first operand are read
5903     SDValue Op0 = N->getOperand(0);
5904     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5905     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5906       if (N->getOpcode() != ISD::DELETED_NODE)
5907         DCI.AddToWorklist(N);
5908       return SDValue(N, 0);
5909     }
5910     break;
5911   }
5912   case RISCVISD::FSL:
5913   case RISCVISD::FSR: {
5914     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5915     SDValue ShAmt = N->getOperand(2);
5916     unsigned BitWidth = ShAmt.getValueSizeInBits();
5917     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5918     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5919     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5920       if (N->getOpcode() != ISD::DELETED_NODE)
5921         DCI.AddToWorklist(N);
5922       return SDValue(N, 0);
5923     }
5924     break;
5925   }
5926   case RISCVISD::FSLW:
5927   case RISCVISD::FSRW: {
5928     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5929     // read.
5930     SDValue Op0 = N->getOperand(0);
5931     SDValue Op1 = N->getOperand(1);
5932     SDValue ShAmt = N->getOperand(2);
5933     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5934     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5935     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5936         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5937         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5938       if (N->getOpcode() != ISD::DELETED_NODE)
5939         DCI.AddToWorklist(N);
5940       return SDValue(N, 0);
5941     }
5942     break;
5943   }
5944   case RISCVISD::GREV:
5945   case RISCVISD::GORC: {
5946     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5947     SDValue ShAmt = N->getOperand(1);
5948     unsigned BitWidth = ShAmt.getValueSizeInBits();
5949     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5950     APInt ShAmtMask(BitWidth, BitWidth - 1);
5951     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5952       if (N->getOpcode() != ISD::DELETED_NODE)
5953         DCI.AddToWorklist(N);
5954       return SDValue(N, 0);
5955     }
5956 
5957     return combineGREVI_GORCI(N, DCI.DAG);
5958   }
5959   case RISCVISD::GREVW:
5960   case RISCVISD::GORCW: {
5961     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5962     SDValue LHS = N->getOperand(0);
5963     SDValue RHS = N->getOperand(1);
5964     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5965     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5966     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5967         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5968       if (N->getOpcode() != ISD::DELETED_NODE)
5969         DCI.AddToWorklist(N);
5970       return SDValue(N, 0);
5971     }
5972 
5973     return combineGREVI_GORCI(N, DCI.DAG);
5974   }
5975   case RISCVISD::SHFL:
5976   case RISCVISD::UNSHFL: {
5977     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5978     SDValue ShAmt = N->getOperand(1);
5979     unsigned BitWidth = ShAmt.getValueSizeInBits();
5980     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5981     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5982     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5983       if (N->getOpcode() != ISD::DELETED_NODE)
5984         DCI.AddToWorklist(N);
5985       return SDValue(N, 0);
5986     }
5987 
5988     break;
5989   }
5990   case RISCVISD::SHFLW:
5991   case RISCVISD::UNSHFLW: {
5992     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5993     SDValue LHS = N->getOperand(0);
5994     SDValue RHS = N->getOperand(1);
5995     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5996     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5997     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5998         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5999       if (N->getOpcode() != ISD::DELETED_NODE)
6000         DCI.AddToWorklist(N);
6001       return SDValue(N, 0);
6002     }
6003 
6004     break;
6005   }
6006   case RISCVISD::BCOMPRESSW:
6007   case RISCVISD::BDECOMPRESSW: {
6008     // Only the lower 32 bits of LHS and RHS are read.
6009     SDValue LHS = N->getOperand(0);
6010     SDValue RHS = N->getOperand(1);
6011     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6012     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
6013         SimplifyDemandedBits(RHS, Mask, DCI)) {
6014       if (N->getOpcode() != ISD::DELETED_NODE)
6015         DCI.AddToWorklist(N);
6016       return SDValue(N, 0);
6017     }
6018 
6019     break;
6020   }
6021   case RISCVISD::FMV_X_ANYEXTW_RV64: {
6022     SDLoc DL(N);
6023     SDValue Op0 = N->getOperand(0);
6024     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6025     // conversion is unnecessary and can be replaced with an ANY_EXTEND
6026     // of the FMV_W_X_RV64 operand.
6027     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
6028       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
6029              "Unexpected value type!");
6030       return Op0.getOperand(0);
6031     }
6032 
6033     // This is a target-specific version of a DAGCombine performed in
6034     // DAGCombiner::visitBITCAST. It performs the equivalent of:
6035     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6036     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6037     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6038         !Op0.getNode()->hasOneUse())
6039       break;
6040     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
6041                                  Op0.getOperand(0));
6042     APInt SignBit = APInt::getSignMask(32).sext(64);
6043     if (Op0.getOpcode() == ISD::FNEG)
6044       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
6045                          DAG.getConstant(SignBit, DL, MVT::i64));
6046 
6047     assert(Op0.getOpcode() == ISD::FABS);
6048     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
6049                        DAG.getConstant(~SignBit, DL, MVT::i64));
6050   }
6051   case ISD::AND:
6052     return performANDCombine(N, DCI, Subtarget);
6053   case ISD::OR:
6054     return performORCombine(N, DCI, Subtarget);
6055   case ISD::XOR:
6056     return performXORCombine(N, DCI, Subtarget);
6057   case ISD::ANY_EXTEND:
6058     return performANY_EXTENDCombine(N, DCI, Subtarget);
6059   case RISCVISD::SELECT_CC: {
6060     // Transform
6061     SDValue LHS = N->getOperand(0);
6062     SDValue RHS = N->getOperand(1);
6063     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
6064     if (!ISD::isIntEqualitySetCC(CCVal))
6065       break;
6066 
6067     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6068     //      (select_cc X, Y, lt, trueV, falseV)
6069     // Sometimes the setcc is introduced after select_cc has been formed.
6070     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6071         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6072       // If we're looking for eq 0 instead of ne 0, we need to invert the
6073       // condition.
6074       bool Invert = CCVal == ISD::SETEQ;
6075       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6076       if (Invert)
6077         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6078 
6079       SDLoc DL(N);
6080       RHS = LHS.getOperand(1);
6081       LHS = LHS.getOperand(0);
6082       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6083 
6084       SDValue TargetCC =
6085           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
6086       return DAG.getNode(
6087           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6088           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6089     }
6090 
6091     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6092     //      (select_cc X, Y, eq/ne, trueV, falseV)
6093     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6094       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6095                          {LHS.getOperand(0), LHS.getOperand(1),
6096                           N->getOperand(2), N->getOperand(3),
6097                           N->getOperand(4)});
6098     // (select_cc X, 1, setne, trueV, falseV) ->
6099     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6100     // This can occur when legalizing some floating point comparisons.
6101     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6102     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6103       SDLoc DL(N);
6104       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6105       SDValue TargetCC =
6106           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
6107       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6108       return DAG.getNode(
6109           RISCVISD::SELECT_CC, DL, N->getValueType(0),
6110           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
6111     }
6112 
6113     break;
6114   }
6115   case RISCVISD::BR_CC: {
6116     SDValue LHS = N->getOperand(1);
6117     SDValue RHS = N->getOperand(2);
6118     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6119     if (!ISD::isIntEqualitySetCC(CCVal))
6120       break;
6121 
6122     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6123     //      (br_cc X, Y, lt, dest)
6124     // Sometimes the setcc is introduced after br_cc has been formed.
6125     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6126         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6127       // If we're looking for eq 0 instead of ne 0, we need to invert the
6128       // condition.
6129       bool Invert = CCVal == ISD::SETEQ;
6130       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6131       if (Invert)
6132         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6133 
6134       SDLoc DL(N);
6135       RHS = LHS.getOperand(1);
6136       LHS = LHS.getOperand(0);
6137       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6138 
6139       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6140                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6141                          N->getOperand(4));
6142     }
6143 
6144     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6145     //      (br_cc X, Y, eq/ne, trueV, falseV)
6146     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6147       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6148                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6149                          N->getOperand(3), N->getOperand(4));
6150 
6151     // (br_cc X, 1, setne, br_cc) ->
6152     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6153     // This can occur when legalizing some floating point comparisons.
6154     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6155     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6156       SDLoc DL(N);
6157       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6158       SDValue TargetCC = DAG.getCondCode(CCVal);
6159       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6160       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6161                          N->getOperand(0), LHS, RHS, TargetCC,
6162                          N->getOperand(4));
6163     }
6164     break;
6165   }
6166   case ISD::FCOPYSIGN: {
6167     EVT VT = N->getValueType(0);
6168     if (!VT.isVector())
6169       break;
6170     // There is a form of VFSGNJ which injects the negated sign of its second
6171     // operand. Try and bubble any FNEG up after the extend/round to produce
6172     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6173     // TRUNC=1.
6174     SDValue In2 = N->getOperand(1);
6175     // Avoid cases where the extend/round has multiple uses, as duplicating
6176     // those is typically more expensive than removing a fneg.
6177     if (!In2.hasOneUse())
6178       break;
6179     if (In2.getOpcode() != ISD::FP_EXTEND &&
6180         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6181       break;
6182     In2 = In2.getOperand(0);
6183     if (In2.getOpcode() != ISD::FNEG)
6184       break;
6185     SDLoc DL(N);
6186     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6187     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6188                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6189   }
6190   case ISD::MGATHER:
6191   case ISD::MSCATTER: {
6192     if (!DCI.isBeforeLegalize())
6193       break;
6194     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
6195     SDValue Index = MGSN->getIndex();
6196     EVT IndexVT = Index.getValueType();
6197     MVT XLenVT = Subtarget.getXLenVT();
6198     // RISCV indexed loads only support the "unsigned unscaled" addressing
6199     // mode, so anything else must be manually legalized.
6200     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
6201                                 (MGSN->isIndexSigned() &&
6202                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
6203     if (!NeedsIdxLegalization)
6204       break;
6205 
6206     SDLoc DL(N);
6207 
6208     // Any index legalization should first promote to XLenVT, so we don't lose
6209     // bits when scaling. This may create an illegal index type so we let
6210     // LLVM's legalization take care of the splitting.
6211     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6212       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6213       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
6214                                                 : ISD::ZERO_EXTEND,
6215                           DL, IndexVT, Index);
6216     }
6217 
6218     unsigned Scale = N->getConstantOperandVal(5);
6219     if (MGSN->isIndexScaled() && Scale != 1) {
6220       // Manually scale the indices by the element size.
6221       // TODO: Sanitize the scale operand here?
6222       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6223       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6224       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6225     }
6226 
6227     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6228     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6229       return DAG.getMaskedGather(
6230           N->getVTList(), MGSN->getMemoryVT(), DL,
6231           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6232            MGSN->getBasePtr(), Index, MGN->getScale()},
6233           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6234     }
6235     const auto *MSN = cast<MaskedScatterSDNode>(N);
6236     return DAG.getMaskedScatter(
6237         N->getVTList(), MGSN->getMemoryVT(), DL,
6238         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6239          Index, MGSN->getScale()},
6240         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6241   }
6242   case RISCVISD::SRA_VL:
6243   case RISCVISD::SRL_VL:
6244   case RISCVISD::SHL_VL: {
6245     SDValue ShAmt = N->getOperand(1);
6246     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6247       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6248       SDLoc DL(N);
6249       SDValue VL = N->getOperand(3);
6250       EVT VT = N->getValueType(0);
6251       ShAmt =
6252           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6253       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6254                          N->getOperand(2), N->getOperand(3));
6255     }
6256     break;
6257   }
6258   case ISD::SRA:
6259   case ISD::SRL:
6260   case ISD::SHL: {
6261     SDValue ShAmt = N->getOperand(1);
6262     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6263       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6264       SDLoc DL(N);
6265       EVT VT = N->getValueType(0);
6266       ShAmt =
6267           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6268       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6269     }
6270     break;
6271   }
6272   case RISCVISD::MUL_VL: {
6273     // Try to form VWMUL or VWMULU.
6274     // FIXME: Look for splat of extended scalar as well.
6275     // FIXME: Support VWMULSU.
6276     SDValue Op0 = N->getOperand(0);
6277     SDValue Op1 = N->getOperand(1);
6278     bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6279     bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6280     if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
6281       return SDValue();
6282 
6283     // Make sure the extends have a single use.
6284     if (!Op0.hasOneUse() || !Op1.hasOneUse())
6285       return SDValue();
6286 
6287     SDValue Mask = N->getOperand(2);
6288     SDValue VL = N->getOperand(3);
6289     if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
6290         Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
6291       return SDValue();
6292 
6293     Op0 = Op0.getOperand(0);
6294     Op1 = Op1.getOperand(0);
6295 
6296     MVT VT = N->getSimpleValueType(0);
6297     MVT NarrowVT =
6298         MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
6299                          VT.getVectorElementCount());
6300 
6301     SDLoc DL(N);
6302 
6303     // Re-introduce narrower extends if needed.
6304     unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6305     if (Op0.getValueType() != NarrowVT)
6306       Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6307     if (Op1.getValueType() != NarrowVT)
6308       Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6309 
6310     unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6311     return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6312   }
6313   }
6314 
6315   return SDValue();
6316 }
6317 
6318 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6319     const SDNode *N, CombineLevel Level) const {
6320   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6321   // materialised in fewer instructions than `(OP _, c1)`:
6322   //
6323   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6324   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6325   SDValue N0 = N->getOperand(0);
6326   EVT Ty = N0.getValueType();
6327   if (Ty.isScalarInteger() &&
6328       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6329     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6330     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6331     if (C1 && C2) {
6332       const APInt &C1Int = C1->getAPIntValue();
6333       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6334 
6335       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6336       // and the combine should happen, to potentially allow further combines
6337       // later.
6338       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6339           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6340         return true;
6341 
6342       // We can materialise `c1` in an add immediate, so it's "free", and the
6343       // combine should be prevented.
6344       if (C1Int.getMinSignedBits() <= 64 &&
6345           isLegalAddImmediate(C1Int.getSExtValue()))
6346         return false;
6347 
6348       // Neither constant will fit into an immediate, so find materialisation
6349       // costs.
6350       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6351                                               Subtarget.getFeatureBits(),
6352                                               /*CompressionCost*/true);
6353       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6354           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
6355           /*CompressionCost*/true);
6356 
6357       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6358       // combine should be prevented.
6359       if (C1Cost < ShiftedC1Cost)
6360         return false;
6361     }
6362   }
6363   return true;
6364 }
6365 
6366 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6367     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6368     TargetLoweringOpt &TLO) const {
6369   // Delay this optimization as late as possible.
6370   if (!TLO.LegalOps)
6371     return false;
6372 
6373   EVT VT = Op.getValueType();
6374   if (VT.isVector())
6375     return false;
6376 
6377   // Only handle AND for now.
6378   if (Op.getOpcode() != ISD::AND)
6379     return false;
6380 
6381   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6382   if (!C)
6383     return false;
6384 
6385   const APInt &Mask = C->getAPIntValue();
6386 
6387   // Clear all non-demanded bits initially.
6388   APInt ShrunkMask = Mask & DemandedBits;
6389 
6390   // Try to make a smaller immediate by setting undemanded bits.
6391 
6392   APInt ExpandedMask = Mask | ~DemandedBits;
6393 
6394   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6395     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6396   };
6397   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6398     if (NewMask == Mask)
6399       return true;
6400     SDLoc DL(Op);
6401     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6402     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6403     return TLO.CombineTo(Op, NewOp);
6404   };
6405 
6406   // If the shrunk mask fits in sign extended 12 bits, let the target
6407   // independent code apply it.
6408   if (ShrunkMask.isSignedIntN(12))
6409     return false;
6410 
6411   // Preserve (and X, 0xffff) when zext.h is supported.
6412   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6413     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6414     if (IsLegalMask(NewMask))
6415       return UseMask(NewMask);
6416   }
6417 
6418   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6419   if (VT == MVT::i64) {
6420     APInt NewMask = APInt(64, 0xffffffff);
6421     if (IsLegalMask(NewMask))
6422       return UseMask(NewMask);
6423   }
6424 
6425   // For the remaining optimizations, we need to be able to make a negative
6426   // number through a combination of mask and undemanded bits.
6427   if (!ExpandedMask.isNegative())
6428     return false;
6429 
6430   // What is the fewest number of bits we need to represent the negative number.
6431   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6432 
6433   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6434   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6435   APInt NewMask = ShrunkMask;
6436   if (MinSignedBits <= 12)
6437     NewMask.setBitsFrom(11);
6438   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6439     NewMask.setBitsFrom(31);
6440   else
6441     return false;
6442 
6443   // Sanity check that our new mask is a subset of the demanded mask.
6444   assert(IsLegalMask(NewMask));
6445   return UseMask(NewMask);
6446 }
6447 
6448 static void computeGREV(APInt &Src, unsigned ShAmt) {
6449   ShAmt &= Src.getBitWidth() - 1;
6450   uint64_t x = Src.getZExtValue();
6451   if (ShAmt & 1)
6452     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6453   if (ShAmt & 2)
6454     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6455   if (ShAmt & 4)
6456     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6457   if (ShAmt & 8)
6458     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6459   if (ShAmt & 16)
6460     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6461   if (ShAmt & 32)
6462     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6463   Src = x;
6464 }
6465 
6466 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6467                                                         KnownBits &Known,
6468                                                         const APInt &DemandedElts,
6469                                                         const SelectionDAG &DAG,
6470                                                         unsigned Depth) const {
6471   unsigned BitWidth = Known.getBitWidth();
6472   unsigned Opc = Op.getOpcode();
6473   assert((Opc >= ISD::BUILTIN_OP_END ||
6474           Opc == ISD::INTRINSIC_WO_CHAIN ||
6475           Opc == ISD::INTRINSIC_W_CHAIN ||
6476           Opc == ISD::INTRINSIC_VOID) &&
6477          "Should use MaskedValueIsZero if you don't know whether Op"
6478          " is a target node!");
6479 
6480   Known.resetAll();
6481   switch (Opc) {
6482   default: break;
6483   case RISCVISD::SELECT_CC: {
6484     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6485     // If we don't know any bits, early out.
6486     if (Known.isUnknown())
6487       break;
6488     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6489 
6490     // Only known if known in both the LHS and RHS.
6491     Known = KnownBits::commonBits(Known, Known2);
6492     break;
6493   }
6494   case RISCVISD::REMUW: {
6495     KnownBits Known2;
6496     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6497     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6498     // We only care about the lower 32 bits.
6499     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6500     // Restore the original width by sign extending.
6501     Known = Known.sext(BitWidth);
6502     break;
6503   }
6504   case RISCVISD::DIVUW: {
6505     KnownBits Known2;
6506     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6507     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6508     // We only care about the lower 32 bits.
6509     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6510     // Restore the original width by sign extending.
6511     Known = Known.sext(BitWidth);
6512     break;
6513   }
6514   case RISCVISD::CTZW: {
6515     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6516     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6517     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6518     Known.Zero.setBitsFrom(LowBits);
6519     break;
6520   }
6521   case RISCVISD::CLZW: {
6522     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6523     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6524     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6525     Known.Zero.setBitsFrom(LowBits);
6526     break;
6527   }
6528   case RISCVISD::GREV:
6529   case RISCVISD::GREVW: {
6530     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6531       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6532       if (Opc == RISCVISD::GREVW)
6533         Known = Known.trunc(32);
6534       unsigned ShAmt = C->getZExtValue();
6535       computeGREV(Known.Zero, ShAmt);
6536       computeGREV(Known.One, ShAmt);
6537       if (Opc == RISCVISD::GREVW)
6538         Known = Known.sext(BitWidth);
6539     }
6540     break;
6541   }
6542   case RISCVISD::READ_VLENB:
6543     // We assume VLENB is at least 16 bytes.
6544     Known.Zero.setLowBits(4);
6545     // We assume VLENB is no more than 65536 / 8 bytes.
6546     Known.Zero.setBitsFrom(14);
6547     break;
6548   case ISD::INTRINSIC_W_CHAIN: {
6549     unsigned IntNo = Op.getConstantOperandVal(1);
6550     switch (IntNo) {
6551     default:
6552       // We can't do anything for most intrinsics.
6553       break;
6554     case Intrinsic::riscv_vsetvli:
6555     case Intrinsic::riscv_vsetvlimax:
6556       // Assume that VL output is positive and would fit in an int32_t.
6557       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6558       if (BitWidth >= 32)
6559         Known.Zero.setBitsFrom(31);
6560       break;
6561     }
6562     break;
6563   }
6564   }
6565 }
6566 
6567 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6568     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6569     unsigned Depth) const {
6570   switch (Op.getOpcode()) {
6571   default:
6572     break;
6573   case RISCVISD::SLLW:
6574   case RISCVISD::SRAW:
6575   case RISCVISD::SRLW:
6576   case RISCVISD::DIVW:
6577   case RISCVISD::DIVUW:
6578   case RISCVISD::REMUW:
6579   case RISCVISD::ROLW:
6580   case RISCVISD::RORW:
6581   case RISCVISD::GREVW:
6582   case RISCVISD::GORCW:
6583   case RISCVISD::FSLW:
6584   case RISCVISD::FSRW:
6585   case RISCVISD::SHFLW:
6586   case RISCVISD::UNSHFLW:
6587   case RISCVISD::BCOMPRESSW:
6588   case RISCVISD::BDECOMPRESSW:
6589     // TODO: As the result is sign-extended, this is conservatively correct. A
6590     // more precise answer could be calculated for SRAW depending on known
6591     // bits in the shift amount.
6592     return 33;
6593   case RISCVISD::SHFL:
6594   case RISCVISD::UNSHFL: {
6595     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6596     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6597     // will stay within the upper 32 bits. If there were more than 32 sign bits
6598     // before there will be at least 33 sign bits after.
6599     if (Op.getValueType() == MVT::i64 &&
6600         isa<ConstantSDNode>(Op.getOperand(1)) &&
6601         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6602       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6603       if (Tmp > 32)
6604         return 33;
6605     }
6606     break;
6607   }
6608   case RISCVISD::VMV_X_S:
6609     // The number of sign bits of the scalar result is computed by obtaining the
6610     // element type of the input vector operand, subtracting its width from the
6611     // XLEN, and then adding one (sign bit within the element type). If the
6612     // element type is wider than XLen, the least-significant XLEN bits are
6613     // taken.
6614     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6615       return 1;
6616     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6617   }
6618 
6619   return 1;
6620 }
6621 
6622 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6623                                                   MachineBasicBlock *BB) {
6624   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6625 
6626   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6627   // Should the count have wrapped while it was being read, we need to try
6628   // again.
6629   // ...
6630   // read:
6631   // rdcycleh x3 # load high word of cycle
6632   // rdcycle  x2 # load low word of cycle
6633   // rdcycleh x4 # load high word of cycle
6634   // bne x3, x4, read # check if high word reads match, otherwise try again
6635   // ...
6636 
6637   MachineFunction &MF = *BB->getParent();
6638   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6639   MachineFunction::iterator It = ++BB->getIterator();
6640 
6641   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6642   MF.insert(It, LoopMBB);
6643 
6644   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6645   MF.insert(It, DoneMBB);
6646 
6647   // Transfer the remainder of BB and its successor edges to DoneMBB.
6648   DoneMBB->splice(DoneMBB->begin(), BB,
6649                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6650   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6651 
6652   BB->addSuccessor(LoopMBB);
6653 
6654   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6655   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6656   Register LoReg = MI.getOperand(0).getReg();
6657   Register HiReg = MI.getOperand(1).getReg();
6658   DebugLoc DL = MI.getDebugLoc();
6659 
6660   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6661   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6662       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6663       .addReg(RISCV::X0);
6664   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6665       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6666       .addReg(RISCV::X0);
6667   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6668       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6669       .addReg(RISCV::X0);
6670 
6671   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6672       .addReg(HiReg)
6673       .addReg(ReadAgainReg)
6674       .addMBB(LoopMBB);
6675 
6676   LoopMBB->addSuccessor(LoopMBB);
6677   LoopMBB->addSuccessor(DoneMBB);
6678 
6679   MI.eraseFromParent();
6680 
6681   return DoneMBB;
6682 }
6683 
6684 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6685                                              MachineBasicBlock *BB) {
6686   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6687 
6688   MachineFunction &MF = *BB->getParent();
6689   DebugLoc DL = MI.getDebugLoc();
6690   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6691   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6692   Register LoReg = MI.getOperand(0).getReg();
6693   Register HiReg = MI.getOperand(1).getReg();
6694   Register SrcReg = MI.getOperand(2).getReg();
6695   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6696   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6697 
6698   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6699                           RI);
6700   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6701   MachineMemOperand *MMOLo =
6702       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6703   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6704       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6705   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6706       .addFrameIndex(FI)
6707       .addImm(0)
6708       .addMemOperand(MMOLo);
6709   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6710       .addFrameIndex(FI)
6711       .addImm(4)
6712       .addMemOperand(MMOHi);
6713   MI.eraseFromParent(); // The pseudo instruction is gone now.
6714   return BB;
6715 }
6716 
6717 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6718                                                  MachineBasicBlock *BB) {
6719   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6720          "Unexpected instruction");
6721 
6722   MachineFunction &MF = *BB->getParent();
6723   DebugLoc DL = MI.getDebugLoc();
6724   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6725   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6726   Register DstReg = MI.getOperand(0).getReg();
6727   Register LoReg = MI.getOperand(1).getReg();
6728   Register HiReg = MI.getOperand(2).getReg();
6729   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6730   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6731 
6732   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6733   MachineMemOperand *MMOLo =
6734       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6735   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6736       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6737   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6738       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6739       .addFrameIndex(FI)
6740       .addImm(0)
6741       .addMemOperand(MMOLo);
6742   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6743       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6744       .addFrameIndex(FI)
6745       .addImm(4)
6746       .addMemOperand(MMOHi);
6747   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6748   MI.eraseFromParent(); // The pseudo instruction is gone now.
6749   return BB;
6750 }
6751 
6752 static bool isSelectPseudo(MachineInstr &MI) {
6753   switch (MI.getOpcode()) {
6754   default:
6755     return false;
6756   case RISCV::Select_GPR_Using_CC_GPR:
6757   case RISCV::Select_FPR16_Using_CC_GPR:
6758   case RISCV::Select_FPR32_Using_CC_GPR:
6759   case RISCV::Select_FPR64_Using_CC_GPR:
6760     return true;
6761   }
6762 }
6763 
6764 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6765                                            MachineBasicBlock *BB) {
6766   // To "insert" Select_* instructions, we actually have to insert the triangle
6767   // control-flow pattern.  The incoming instructions know the destination vreg
6768   // to set, the condition code register to branch on, the true/false values to
6769   // select between, and the condcode to use to select the appropriate branch.
6770   //
6771   // We produce the following control flow:
6772   //     HeadMBB
6773   //     |  \
6774   //     |  IfFalseMBB
6775   //     | /
6776   //    TailMBB
6777   //
6778   // When we find a sequence of selects we attempt to optimize their emission
6779   // by sharing the control flow. Currently we only handle cases where we have
6780   // multiple selects with the exact same condition (same LHS, RHS and CC).
6781   // The selects may be interleaved with other instructions if the other
6782   // instructions meet some requirements we deem safe:
6783   // - They are debug instructions. Otherwise,
6784   // - They do not have side-effects, do not access memory and their inputs do
6785   //   not depend on the results of the select pseudo-instructions.
6786   // The TrueV/FalseV operands of the selects cannot depend on the result of
6787   // previous selects in the sequence.
6788   // These conditions could be further relaxed. See the X86 target for a
6789   // related approach and more information.
6790   Register LHS = MI.getOperand(1).getReg();
6791   Register RHS = MI.getOperand(2).getReg();
6792   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6793 
6794   SmallVector<MachineInstr *, 4> SelectDebugValues;
6795   SmallSet<Register, 4> SelectDests;
6796   SelectDests.insert(MI.getOperand(0).getReg());
6797 
6798   MachineInstr *LastSelectPseudo = &MI;
6799 
6800   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6801        SequenceMBBI != E; ++SequenceMBBI) {
6802     if (SequenceMBBI->isDebugInstr())
6803       continue;
6804     else if (isSelectPseudo(*SequenceMBBI)) {
6805       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6806           SequenceMBBI->getOperand(2).getReg() != RHS ||
6807           SequenceMBBI->getOperand(3).getImm() != CC ||
6808           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6809           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6810         break;
6811       LastSelectPseudo = &*SequenceMBBI;
6812       SequenceMBBI->collectDebugValues(SelectDebugValues);
6813       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6814     } else {
6815       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6816           SequenceMBBI->mayLoadOrStore())
6817         break;
6818       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6819             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6820           }))
6821         break;
6822     }
6823   }
6824 
6825   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6826   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6827   DebugLoc DL = MI.getDebugLoc();
6828   MachineFunction::iterator I = ++BB->getIterator();
6829 
6830   MachineBasicBlock *HeadMBB = BB;
6831   MachineFunction *F = BB->getParent();
6832   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6833   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6834 
6835   F->insert(I, IfFalseMBB);
6836   F->insert(I, TailMBB);
6837 
6838   // Transfer debug instructions associated with the selects to TailMBB.
6839   for (MachineInstr *DebugInstr : SelectDebugValues) {
6840     TailMBB->push_back(DebugInstr->removeFromParent());
6841   }
6842 
6843   // Move all instructions after the sequence to TailMBB.
6844   TailMBB->splice(TailMBB->end(), HeadMBB,
6845                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6846   // Update machine-CFG edges by transferring all successors of the current
6847   // block to the new block which will contain the Phi nodes for the selects.
6848   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6849   // Set the successors for HeadMBB.
6850   HeadMBB->addSuccessor(IfFalseMBB);
6851   HeadMBB->addSuccessor(TailMBB);
6852 
6853   // Insert appropriate branch.
6854   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6855 
6856   BuildMI(HeadMBB, DL, TII.get(Opcode))
6857     .addReg(LHS)
6858     .addReg(RHS)
6859     .addMBB(TailMBB);
6860 
6861   // IfFalseMBB just falls through to TailMBB.
6862   IfFalseMBB->addSuccessor(TailMBB);
6863 
6864   // Create PHIs for all of the select pseudo-instructions.
6865   auto SelectMBBI = MI.getIterator();
6866   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6867   auto InsertionPoint = TailMBB->begin();
6868   while (SelectMBBI != SelectEnd) {
6869     auto Next = std::next(SelectMBBI);
6870     if (isSelectPseudo(*SelectMBBI)) {
6871       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6872       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6873               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6874           .addReg(SelectMBBI->getOperand(4).getReg())
6875           .addMBB(HeadMBB)
6876           .addReg(SelectMBBI->getOperand(5).getReg())
6877           .addMBB(IfFalseMBB);
6878       SelectMBBI->eraseFromParent();
6879     }
6880     SelectMBBI = Next;
6881   }
6882 
6883   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6884   return TailMBB;
6885 }
6886 
6887 MachineBasicBlock *
6888 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6889                                                  MachineBasicBlock *BB) const {
6890   switch (MI.getOpcode()) {
6891   default:
6892     llvm_unreachable("Unexpected instr type to insert");
6893   case RISCV::ReadCycleWide:
6894     assert(!Subtarget.is64Bit() &&
6895            "ReadCycleWrite is only to be used on riscv32");
6896     return emitReadCycleWidePseudo(MI, BB);
6897   case RISCV::Select_GPR_Using_CC_GPR:
6898   case RISCV::Select_FPR16_Using_CC_GPR:
6899   case RISCV::Select_FPR32_Using_CC_GPR:
6900   case RISCV::Select_FPR64_Using_CC_GPR:
6901     return emitSelectPseudo(MI, BB);
6902   case RISCV::BuildPairF64Pseudo:
6903     return emitBuildPairF64Pseudo(MI, BB);
6904   case RISCV::SplitF64Pseudo:
6905     return emitSplitF64Pseudo(MI, BB);
6906   }
6907 }
6908 
6909 // Calling Convention Implementation.
6910 // The expectations for frontend ABI lowering vary from target to target.
6911 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6912 // details, but this is a longer term goal. For now, we simply try to keep the
6913 // role of the frontend as simple and well-defined as possible. The rules can
6914 // be summarised as:
6915 // * Never split up large scalar arguments. We handle them here.
6916 // * If a hardfloat calling convention is being used, and the struct may be
6917 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6918 // available, then pass as two separate arguments. If either the GPRs or FPRs
6919 // are exhausted, then pass according to the rule below.
6920 // * If a struct could never be passed in registers or directly in a stack
6921 // slot (as it is larger than 2*XLEN and the floating point rules don't
6922 // apply), then pass it using a pointer with the byval attribute.
6923 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6924 // word-sized array or a 2*XLEN scalar (depending on alignment).
6925 // * The frontend can determine whether a struct is returned by reference or
6926 // not based on its size and fields. If it will be returned by reference, the
6927 // frontend must modify the prototype so a pointer with the sret annotation is
6928 // passed as the first argument. This is not necessary for large scalar
6929 // returns.
6930 // * Struct return values and varargs should be coerced to structs containing
6931 // register-size fields in the same situations they would be for fixed
6932 // arguments.
6933 
6934 static const MCPhysReg ArgGPRs[] = {
6935   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6936   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6937 };
6938 static const MCPhysReg ArgFPR16s[] = {
6939   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6940   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6941 };
6942 static const MCPhysReg ArgFPR32s[] = {
6943   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6944   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6945 };
6946 static const MCPhysReg ArgFPR64s[] = {
6947   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6948   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6949 };
6950 // This is an interim calling convention and it may be changed in the future.
6951 static const MCPhysReg ArgVRs[] = {
6952     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6953     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6954     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6955 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6956                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6957                                      RISCV::V20M2, RISCV::V22M2};
6958 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6959                                      RISCV::V20M4};
6960 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6961 
6962 // Pass a 2*XLEN argument that has been split into two XLEN values through
6963 // registers or the stack as necessary.
6964 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6965                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6966                                 MVT ValVT2, MVT LocVT2,
6967                                 ISD::ArgFlagsTy ArgFlags2) {
6968   unsigned XLenInBytes = XLen / 8;
6969   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6970     // At least one half can be passed via register.
6971     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6972                                      VA1.getLocVT(), CCValAssign::Full));
6973   } else {
6974     // Both halves must be passed on the stack, with proper alignment.
6975     Align StackAlign =
6976         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6977     State.addLoc(
6978         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6979                             State.AllocateStack(XLenInBytes, StackAlign),
6980                             VA1.getLocVT(), CCValAssign::Full));
6981     State.addLoc(CCValAssign::getMem(
6982         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6983         LocVT2, CCValAssign::Full));
6984     return false;
6985   }
6986 
6987   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6988     // The second half can also be passed via register.
6989     State.addLoc(
6990         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6991   } else {
6992     // The second half is passed via the stack, without additional alignment.
6993     State.addLoc(CCValAssign::getMem(
6994         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6995         LocVT2, CCValAssign::Full));
6996   }
6997 
6998   return false;
6999 }
7000 
7001 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7002                                Optional<unsigned> FirstMaskArgument,
7003                                CCState &State, const RISCVTargetLowering &TLI) {
7004   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7005   if (RC == &RISCV::VRRegClass) {
7006     // Assign the first mask argument to V0.
7007     // This is an interim calling convention and it may be changed in the
7008     // future.
7009     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7010       return State.AllocateReg(RISCV::V0);
7011     return State.AllocateReg(ArgVRs);
7012   }
7013   if (RC == &RISCV::VRM2RegClass)
7014     return State.AllocateReg(ArgVRM2s);
7015   if (RC == &RISCV::VRM4RegClass)
7016     return State.AllocateReg(ArgVRM4s);
7017   if (RC == &RISCV::VRM8RegClass)
7018     return State.AllocateReg(ArgVRM8s);
7019   llvm_unreachable("Unhandled register class for ValueType");
7020 }
7021 
7022 // Implements the RISC-V calling convention. Returns true upon failure.
7023 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7024                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7025                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7026                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7027                      Optional<unsigned> FirstMaskArgument) {
7028   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7029   assert(XLen == 32 || XLen == 64);
7030   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7031 
7032   // Any return value split in to more than two values can't be returned
7033   // directly. Vectors are returned via the available vector registers.
7034   if (!LocVT.isVector() && IsRet && ValNo > 1)
7035     return true;
7036 
7037   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7038   // variadic argument, or if no F16/F32 argument registers are available.
7039   bool UseGPRForF16_F32 = true;
7040   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7041   // variadic argument, or if no F64 argument registers are available.
7042   bool UseGPRForF64 = true;
7043 
7044   switch (ABI) {
7045   default:
7046     llvm_unreachable("Unexpected ABI");
7047   case RISCVABI::ABI_ILP32:
7048   case RISCVABI::ABI_LP64:
7049     break;
7050   case RISCVABI::ABI_ILP32F:
7051   case RISCVABI::ABI_LP64F:
7052     UseGPRForF16_F32 = !IsFixed;
7053     break;
7054   case RISCVABI::ABI_ILP32D:
7055   case RISCVABI::ABI_LP64D:
7056     UseGPRForF16_F32 = !IsFixed;
7057     UseGPRForF64 = !IsFixed;
7058     break;
7059   }
7060 
7061   // FPR16, FPR32, and FPR64 alias each other.
7062   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7063     UseGPRForF16_F32 = true;
7064     UseGPRForF64 = true;
7065   }
7066 
7067   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7068   // similar local variables rather than directly checking against the target
7069   // ABI.
7070 
7071   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7072     LocVT = XLenVT;
7073     LocInfo = CCValAssign::BCvt;
7074   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7075     LocVT = MVT::i64;
7076     LocInfo = CCValAssign::BCvt;
7077   }
7078 
7079   // If this is a variadic argument, the RISC-V calling convention requires
7080   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7081   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7082   // be used regardless of whether the original argument was split during
7083   // legalisation or not. The argument will not be passed by registers if the
7084   // original type is larger than 2*XLEN, so the register alignment rule does
7085   // not apply.
7086   unsigned TwoXLenInBytes = (2 * XLen) / 8;
7087   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7088       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7089     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7090     // Skip 'odd' register if necessary.
7091     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7092       State.AllocateReg(ArgGPRs);
7093   }
7094 
7095   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7096   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7097       State.getPendingArgFlags();
7098 
7099   assert(PendingLocs.size() == PendingArgFlags.size() &&
7100          "PendingLocs and PendingArgFlags out of sync");
7101 
7102   // Handle passing f64 on RV32D with a soft float ABI or when floating point
7103   // registers are exhausted.
7104   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7105     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7106            "Can't lower f64 if it is split");
7107     // Depending on available argument GPRS, f64 may be passed in a pair of
7108     // GPRs, split between a GPR and the stack, or passed completely on the
7109     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7110     // cases.
7111     Register Reg = State.AllocateReg(ArgGPRs);
7112     LocVT = MVT::i32;
7113     if (!Reg) {
7114       unsigned StackOffset = State.AllocateStack(8, Align(8));
7115       State.addLoc(
7116           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7117       return false;
7118     }
7119     if (!State.AllocateReg(ArgGPRs))
7120       State.AllocateStack(4, Align(4));
7121     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7122     return false;
7123   }
7124 
7125   // Fixed-length vectors are located in the corresponding scalable-vector
7126   // container types.
7127   if (ValVT.isFixedLengthVector())
7128     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7129 
7130   // Split arguments might be passed indirectly, so keep track of the pending
7131   // values. Split vectors are passed via a mix of registers and indirectly, so
7132   // treat them as we would any other argument.
7133   if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7134     LocVT = XLenVT;
7135     LocInfo = CCValAssign::Indirect;
7136     PendingLocs.push_back(
7137         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7138     PendingArgFlags.push_back(ArgFlags);
7139     if (!ArgFlags.isSplitEnd()) {
7140       return false;
7141     }
7142   }
7143 
7144   // If the split argument only had two elements, it should be passed directly
7145   // in registers or on the stack.
7146   if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
7147       PendingLocs.size() <= 2) {
7148     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7149     // Apply the normal calling convention rules to the first half of the
7150     // split argument.
7151     CCValAssign VA = PendingLocs[0];
7152     ISD::ArgFlagsTy AF = PendingArgFlags[0];
7153     PendingLocs.clear();
7154     PendingArgFlags.clear();
7155     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7156                                ArgFlags);
7157   }
7158 
7159   // Allocate to a register if possible, or else a stack slot.
7160   Register Reg;
7161   unsigned StoreSizeBytes = XLen / 8;
7162   Align StackAlign = Align(XLen / 8);
7163 
7164   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
7165     Reg = State.AllocateReg(ArgFPR16s);
7166   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
7167     Reg = State.AllocateReg(ArgFPR32s);
7168   else if (ValVT == MVT::f64 && !UseGPRForF64)
7169     Reg = State.AllocateReg(ArgFPR64s);
7170   else if (ValVT.isVector()) {
7171     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
7172     if (!Reg) {
7173       // For return values, the vector must be passed fully via registers or
7174       // via the stack.
7175       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
7176       // but we're using all of them.
7177       if (IsRet)
7178         return true;
7179       // Try using a GPR to pass the address
7180       if ((Reg = State.AllocateReg(ArgGPRs))) {
7181         LocVT = XLenVT;
7182         LocInfo = CCValAssign::Indirect;
7183       } else if (ValVT.isScalableVector()) {
7184         report_fatal_error("Unable to pass scalable vector types on the stack");
7185       } else {
7186         // Pass fixed-length vectors on the stack.
7187         LocVT = ValVT;
7188         StoreSizeBytes = ValVT.getStoreSize();
7189         // Align vectors to their element sizes, being careful for vXi1
7190         // vectors.
7191         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7192       }
7193     }
7194   } else {
7195     Reg = State.AllocateReg(ArgGPRs);
7196   }
7197 
7198   unsigned StackOffset =
7199       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7200 
7201   // If we reach this point and PendingLocs is non-empty, we must be at the
7202   // end of a split argument that must be passed indirectly.
7203   if (!PendingLocs.empty()) {
7204     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7205     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7206 
7207     for (auto &It : PendingLocs) {
7208       if (Reg)
7209         It.convertToReg(Reg);
7210       else
7211         It.convertToMem(StackOffset);
7212       State.addLoc(It);
7213     }
7214     PendingLocs.clear();
7215     PendingArgFlags.clear();
7216     return false;
7217   }
7218 
7219   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7220           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7221          "Expected an XLenVT or vector types at this stage");
7222 
7223   if (Reg) {
7224     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7225     return false;
7226   }
7227 
7228   // When a floating-point value is passed on the stack, no bit-conversion is
7229   // needed.
7230   if (ValVT.isFloatingPoint()) {
7231     LocVT = ValVT;
7232     LocInfo = CCValAssign::Full;
7233   }
7234   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7235   return false;
7236 }
7237 
7238 template <typename ArgTy>
7239 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7240   for (const auto &ArgIdx : enumerate(Args)) {
7241     MVT ArgVT = ArgIdx.value().VT;
7242     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7243       return ArgIdx.index();
7244   }
7245   return None;
7246 }
7247 
7248 void RISCVTargetLowering::analyzeInputArgs(
7249     MachineFunction &MF, CCState &CCInfo,
7250     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7251     RISCVCCAssignFn Fn) const {
7252   unsigned NumArgs = Ins.size();
7253   FunctionType *FType = MF.getFunction().getFunctionType();
7254 
7255   Optional<unsigned> FirstMaskArgument;
7256   if (Subtarget.hasStdExtV())
7257     FirstMaskArgument = preAssignMask(Ins);
7258 
7259   for (unsigned i = 0; i != NumArgs; ++i) {
7260     MVT ArgVT = Ins[i].VT;
7261     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7262 
7263     Type *ArgTy = nullptr;
7264     if (IsRet)
7265       ArgTy = FType->getReturnType();
7266     else if (Ins[i].isOrigArg())
7267       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7268 
7269     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7270     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7271            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7272            FirstMaskArgument)) {
7273       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7274                         << EVT(ArgVT).getEVTString() << '\n');
7275       llvm_unreachable(nullptr);
7276     }
7277   }
7278 }
7279 
7280 void RISCVTargetLowering::analyzeOutputArgs(
7281     MachineFunction &MF, CCState &CCInfo,
7282     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7283     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7284   unsigned NumArgs = Outs.size();
7285 
7286   Optional<unsigned> FirstMaskArgument;
7287   if (Subtarget.hasStdExtV())
7288     FirstMaskArgument = preAssignMask(Outs);
7289 
7290   for (unsigned i = 0; i != NumArgs; i++) {
7291     MVT ArgVT = Outs[i].VT;
7292     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7293     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7294 
7295     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7296     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7297            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7298            FirstMaskArgument)) {
7299       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7300                         << EVT(ArgVT).getEVTString() << "\n");
7301       llvm_unreachable(nullptr);
7302     }
7303   }
7304 }
7305 
7306 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7307 // values.
7308 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7309                                    const CCValAssign &VA, const SDLoc &DL,
7310                                    const RISCVSubtarget &Subtarget) {
7311   switch (VA.getLocInfo()) {
7312   default:
7313     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7314   case CCValAssign::Full:
7315     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7316       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7317     break;
7318   case CCValAssign::BCvt:
7319     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7320       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7321     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7322       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7323     else
7324       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7325     break;
7326   }
7327   return Val;
7328 }
7329 
7330 // The caller is responsible for loading the full value if the argument is
7331 // passed with CCValAssign::Indirect.
7332 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7333                                 const CCValAssign &VA, const SDLoc &DL,
7334                                 const RISCVTargetLowering &TLI) {
7335   MachineFunction &MF = DAG.getMachineFunction();
7336   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7337   EVT LocVT = VA.getLocVT();
7338   SDValue Val;
7339   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7340   Register VReg = RegInfo.createVirtualRegister(RC);
7341   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7342   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7343 
7344   if (VA.getLocInfo() == CCValAssign::Indirect)
7345     return Val;
7346 
7347   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7348 }
7349 
7350 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7351                                    const CCValAssign &VA, const SDLoc &DL,
7352                                    const RISCVSubtarget &Subtarget) {
7353   EVT LocVT = VA.getLocVT();
7354 
7355   switch (VA.getLocInfo()) {
7356   default:
7357     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7358   case CCValAssign::Full:
7359     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7360       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7361     break;
7362   case CCValAssign::BCvt:
7363     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7364       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7365     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7366       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7367     else
7368       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7369     break;
7370   }
7371   return Val;
7372 }
7373 
7374 // The caller is responsible for loading the full value if the argument is
7375 // passed with CCValAssign::Indirect.
7376 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7377                                 const CCValAssign &VA, const SDLoc &DL) {
7378   MachineFunction &MF = DAG.getMachineFunction();
7379   MachineFrameInfo &MFI = MF.getFrameInfo();
7380   EVT LocVT = VA.getLocVT();
7381   EVT ValVT = VA.getValVT();
7382   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7383   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7384                                  /*Immutable=*/true);
7385   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7386   SDValue Val;
7387 
7388   ISD::LoadExtType ExtType;
7389   switch (VA.getLocInfo()) {
7390   default:
7391     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7392   case CCValAssign::Full:
7393   case CCValAssign::Indirect:
7394   case CCValAssign::BCvt:
7395     ExtType = ISD::NON_EXTLOAD;
7396     break;
7397   }
7398   Val = DAG.getExtLoad(
7399       ExtType, DL, LocVT, Chain, FIN,
7400       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7401   return Val;
7402 }
7403 
7404 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7405                                        const CCValAssign &VA, const SDLoc &DL) {
7406   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7407          "Unexpected VA");
7408   MachineFunction &MF = DAG.getMachineFunction();
7409   MachineFrameInfo &MFI = MF.getFrameInfo();
7410   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7411 
7412   if (VA.isMemLoc()) {
7413     // f64 is passed on the stack.
7414     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7415     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7416     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7417                        MachinePointerInfo::getFixedStack(MF, FI));
7418   }
7419 
7420   assert(VA.isRegLoc() && "Expected register VA assignment");
7421 
7422   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7423   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7424   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7425   SDValue Hi;
7426   if (VA.getLocReg() == RISCV::X17) {
7427     // Second half of f64 is passed on the stack.
7428     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7429     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7430     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7431                      MachinePointerInfo::getFixedStack(MF, FI));
7432   } else {
7433     // Second half of f64 is passed in another GPR.
7434     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7435     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7436     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7437   }
7438   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7439 }
7440 
7441 // FastCC has less than 1% performance improvement for some particular
7442 // benchmark. But theoretically, it may has benenfit for some cases.
7443 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7444                             unsigned ValNo, MVT ValVT, MVT LocVT,
7445                             CCValAssign::LocInfo LocInfo,
7446                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7447                             bool IsFixed, bool IsRet, Type *OrigTy,
7448                             const RISCVTargetLowering &TLI,
7449                             Optional<unsigned> FirstMaskArgument) {
7450 
7451   // X5 and X6 might be used for save-restore libcall.
7452   static const MCPhysReg GPRList[] = {
7453       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7454       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7455       RISCV::X29, RISCV::X30, RISCV::X31};
7456 
7457   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7458     if (unsigned Reg = State.AllocateReg(GPRList)) {
7459       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7460       return false;
7461     }
7462   }
7463 
7464   if (LocVT == MVT::f16) {
7465     static const MCPhysReg FPR16List[] = {
7466         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7467         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7468         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7469         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7470     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7471       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7472       return false;
7473     }
7474   }
7475 
7476   if (LocVT == MVT::f32) {
7477     static const MCPhysReg FPR32List[] = {
7478         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7479         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7480         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7481         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7482     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7483       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7484       return false;
7485     }
7486   }
7487 
7488   if (LocVT == MVT::f64) {
7489     static const MCPhysReg FPR64List[] = {
7490         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7491         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7492         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7493         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7494     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7495       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7496       return false;
7497     }
7498   }
7499 
7500   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7501     unsigned Offset4 = State.AllocateStack(4, Align(4));
7502     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7503     return false;
7504   }
7505 
7506   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7507     unsigned Offset5 = State.AllocateStack(8, Align(8));
7508     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7509     return false;
7510   }
7511 
7512   if (LocVT.isVector()) {
7513     if (unsigned Reg =
7514             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7515       // Fixed-length vectors are located in the corresponding scalable-vector
7516       // container types.
7517       if (ValVT.isFixedLengthVector())
7518         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7519       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7520     } else {
7521       // Try and pass the address via a "fast" GPR.
7522       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7523         LocInfo = CCValAssign::Indirect;
7524         LocVT = TLI.getSubtarget().getXLenVT();
7525         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7526       } else if (ValVT.isFixedLengthVector()) {
7527         auto StackAlign =
7528             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7529         unsigned StackOffset =
7530             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7531         State.addLoc(
7532             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7533       } else {
7534         // Can't pass scalable vectors on the stack.
7535         return true;
7536       }
7537     }
7538 
7539     return false;
7540   }
7541 
7542   return true; // CC didn't match.
7543 }
7544 
7545 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7546                          CCValAssign::LocInfo LocInfo,
7547                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7548 
7549   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7550     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7551     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7552     static const MCPhysReg GPRList[] = {
7553         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7554         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7555     if (unsigned Reg = State.AllocateReg(GPRList)) {
7556       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7557       return false;
7558     }
7559   }
7560 
7561   if (LocVT == MVT::f32) {
7562     // Pass in STG registers: F1, ..., F6
7563     //                        fs0 ... fs5
7564     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7565                                           RISCV::F18_F, RISCV::F19_F,
7566                                           RISCV::F20_F, RISCV::F21_F};
7567     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7568       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7569       return false;
7570     }
7571   }
7572 
7573   if (LocVT == MVT::f64) {
7574     // Pass in STG registers: D1, ..., D6
7575     //                        fs6 ... fs11
7576     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7577                                           RISCV::F24_D, RISCV::F25_D,
7578                                           RISCV::F26_D, RISCV::F27_D};
7579     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7580       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7581       return false;
7582     }
7583   }
7584 
7585   report_fatal_error("No registers left in GHC calling convention");
7586   return true;
7587 }
7588 
7589 // Transform physical registers into virtual registers.
7590 SDValue RISCVTargetLowering::LowerFormalArguments(
7591     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7592     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7593     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7594 
7595   MachineFunction &MF = DAG.getMachineFunction();
7596 
7597   switch (CallConv) {
7598   default:
7599     report_fatal_error("Unsupported calling convention");
7600   case CallingConv::C:
7601   case CallingConv::Fast:
7602     break;
7603   case CallingConv::GHC:
7604     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7605         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7606       report_fatal_error(
7607         "GHC calling convention requires the F and D instruction set extensions");
7608   }
7609 
7610   const Function &Func = MF.getFunction();
7611   if (Func.hasFnAttribute("interrupt")) {
7612     if (!Func.arg_empty())
7613       report_fatal_error(
7614         "Functions with the interrupt attribute cannot have arguments!");
7615 
7616     StringRef Kind =
7617       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7618 
7619     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7620       report_fatal_error(
7621         "Function interrupt attribute argument not supported!");
7622   }
7623 
7624   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7625   MVT XLenVT = Subtarget.getXLenVT();
7626   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7627   // Used with vargs to acumulate store chains.
7628   std::vector<SDValue> OutChains;
7629 
7630   // Assign locations to all of the incoming arguments.
7631   SmallVector<CCValAssign, 16> ArgLocs;
7632   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7633 
7634   if (CallConv == CallingConv::GHC)
7635     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7636   else
7637     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7638                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7639                                                    : CC_RISCV);
7640 
7641   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7642     CCValAssign &VA = ArgLocs[i];
7643     SDValue ArgValue;
7644     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7645     // case.
7646     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7647       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7648     else if (VA.isRegLoc())
7649       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7650     else
7651       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7652 
7653     if (VA.getLocInfo() == CCValAssign::Indirect) {
7654       // If the original argument was split and passed by reference (e.g. i128
7655       // on RV32), we need to load all parts of it here (using the same
7656       // address). Vectors may be partly split to registers and partly to the
7657       // stack, in which case the base address is partly offset and subsequent
7658       // stores are relative to that.
7659       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7660                                    MachinePointerInfo()));
7661       unsigned ArgIndex = Ins[i].OrigArgIndex;
7662       unsigned ArgPartOffset = Ins[i].PartOffset;
7663       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7664       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7665         CCValAssign &PartVA = ArgLocs[i + 1];
7666         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7667         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7668         if (PartVA.getValVT().isScalableVector())
7669           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7670         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7671         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7672                                      MachinePointerInfo()));
7673         ++i;
7674       }
7675       continue;
7676     }
7677     InVals.push_back(ArgValue);
7678   }
7679 
7680   if (IsVarArg) {
7681     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7682     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7683     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7684     MachineFrameInfo &MFI = MF.getFrameInfo();
7685     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7686     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7687 
7688     // Offset of the first variable argument from stack pointer, and size of
7689     // the vararg save area. For now, the varargs save area is either zero or
7690     // large enough to hold a0-a7.
7691     int VaArgOffset, VarArgsSaveSize;
7692 
7693     // If all registers are allocated, then all varargs must be passed on the
7694     // stack and we don't need to save any argregs.
7695     if (ArgRegs.size() == Idx) {
7696       VaArgOffset = CCInfo.getNextStackOffset();
7697       VarArgsSaveSize = 0;
7698     } else {
7699       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7700       VaArgOffset = -VarArgsSaveSize;
7701     }
7702 
7703     // Record the frame index of the first variable argument
7704     // which is a value necessary to VASTART.
7705     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7706     RVFI->setVarArgsFrameIndex(FI);
7707 
7708     // If saving an odd number of registers then create an extra stack slot to
7709     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7710     // offsets to even-numbered registered remain 2*XLEN-aligned.
7711     if (Idx % 2) {
7712       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7713       VarArgsSaveSize += XLenInBytes;
7714     }
7715 
7716     // Copy the integer registers that may have been used for passing varargs
7717     // to the vararg save area.
7718     for (unsigned I = Idx; I < ArgRegs.size();
7719          ++I, VaArgOffset += XLenInBytes) {
7720       const Register Reg = RegInfo.createVirtualRegister(RC);
7721       RegInfo.addLiveIn(ArgRegs[I], Reg);
7722       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7723       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7724       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7725       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7726                                    MachinePointerInfo::getFixedStack(MF, FI));
7727       cast<StoreSDNode>(Store.getNode())
7728           ->getMemOperand()
7729           ->setValue((Value *)nullptr);
7730       OutChains.push_back(Store);
7731     }
7732     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7733   }
7734 
7735   // All stores are grouped in one node to allow the matching between
7736   // the size of Ins and InVals. This only happens for vararg functions.
7737   if (!OutChains.empty()) {
7738     OutChains.push_back(Chain);
7739     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7740   }
7741 
7742   return Chain;
7743 }
7744 
7745 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7746 /// for tail call optimization.
7747 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7748 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7749     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7750     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7751 
7752   auto &Callee = CLI.Callee;
7753   auto CalleeCC = CLI.CallConv;
7754   auto &Outs = CLI.Outs;
7755   auto &Caller = MF.getFunction();
7756   auto CallerCC = Caller.getCallingConv();
7757 
7758   // Exception-handling functions need a special set of instructions to
7759   // indicate a return to the hardware. Tail-calling another function would
7760   // probably break this.
7761   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7762   // should be expanded as new function attributes are introduced.
7763   if (Caller.hasFnAttribute("interrupt"))
7764     return false;
7765 
7766   // Do not tail call opt if the stack is used to pass parameters.
7767   if (CCInfo.getNextStackOffset() != 0)
7768     return false;
7769 
7770   // Do not tail call opt if any parameters need to be passed indirectly.
7771   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7772   // passed indirectly. So the address of the value will be passed in a
7773   // register, or if not available, then the address is put on the stack. In
7774   // order to pass indirectly, space on the stack often needs to be allocated
7775   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7776   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7777   // are passed CCValAssign::Indirect.
7778   for (auto &VA : ArgLocs)
7779     if (VA.getLocInfo() == CCValAssign::Indirect)
7780       return false;
7781 
7782   // Do not tail call opt if either caller or callee uses struct return
7783   // semantics.
7784   auto IsCallerStructRet = Caller.hasStructRetAttr();
7785   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7786   if (IsCallerStructRet || IsCalleeStructRet)
7787     return false;
7788 
7789   // Externally-defined functions with weak linkage should not be
7790   // tail-called. The behaviour of branch instructions in this situation (as
7791   // used for tail calls) is implementation-defined, so we cannot rely on the
7792   // linker replacing the tail call with a return.
7793   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7794     const GlobalValue *GV = G->getGlobal();
7795     if (GV->hasExternalWeakLinkage())
7796       return false;
7797   }
7798 
7799   // The callee has to preserve all registers the caller needs to preserve.
7800   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7801   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7802   if (CalleeCC != CallerCC) {
7803     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7804     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7805       return false;
7806   }
7807 
7808   // Byval parameters hand the function a pointer directly into the stack area
7809   // we want to reuse during a tail call. Working around this *is* possible
7810   // but less efficient and uglier in LowerCall.
7811   for (auto &Arg : Outs)
7812     if (Arg.Flags.isByVal())
7813       return false;
7814 
7815   return true;
7816 }
7817 
7818 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7819   return DAG.getDataLayout().getPrefTypeAlign(
7820       VT.getTypeForEVT(*DAG.getContext()));
7821 }
7822 
7823 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7824 // and output parameter nodes.
7825 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7826                                        SmallVectorImpl<SDValue> &InVals) const {
7827   SelectionDAG &DAG = CLI.DAG;
7828   SDLoc &DL = CLI.DL;
7829   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7830   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7831   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7832   SDValue Chain = CLI.Chain;
7833   SDValue Callee = CLI.Callee;
7834   bool &IsTailCall = CLI.IsTailCall;
7835   CallingConv::ID CallConv = CLI.CallConv;
7836   bool IsVarArg = CLI.IsVarArg;
7837   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7838   MVT XLenVT = Subtarget.getXLenVT();
7839 
7840   MachineFunction &MF = DAG.getMachineFunction();
7841 
7842   // Analyze the operands of the call, assigning locations to each operand.
7843   SmallVector<CCValAssign, 16> ArgLocs;
7844   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7845 
7846   if (CallConv == CallingConv::GHC)
7847     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7848   else
7849     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7850                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7851                                                     : CC_RISCV);
7852 
7853   // Check if it's really possible to do a tail call.
7854   if (IsTailCall)
7855     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7856 
7857   if (IsTailCall)
7858     ++NumTailCalls;
7859   else if (CLI.CB && CLI.CB->isMustTailCall())
7860     report_fatal_error("failed to perform tail call elimination on a call "
7861                        "site marked musttail");
7862 
7863   // Get a count of how many bytes are to be pushed on the stack.
7864   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7865 
7866   // Create local copies for byval args
7867   SmallVector<SDValue, 8> ByValArgs;
7868   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7869     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7870     if (!Flags.isByVal())
7871       continue;
7872 
7873     SDValue Arg = OutVals[i];
7874     unsigned Size = Flags.getByValSize();
7875     Align Alignment = Flags.getNonZeroByValAlign();
7876 
7877     int FI =
7878         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7879     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7880     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7881 
7882     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7883                           /*IsVolatile=*/false,
7884                           /*AlwaysInline=*/false, IsTailCall,
7885                           MachinePointerInfo(), MachinePointerInfo());
7886     ByValArgs.push_back(FIPtr);
7887   }
7888 
7889   if (!IsTailCall)
7890     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7891 
7892   // Copy argument values to their designated locations.
7893   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7894   SmallVector<SDValue, 8> MemOpChains;
7895   SDValue StackPtr;
7896   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7897     CCValAssign &VA = ArgLocs[i];
7898     SDValue ArgValue = OutVals[i];
7899     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7900 
7901     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7902     bool IsF64OnRV32DSoftABI =
7903         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7904     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7905       SDValue SplitF64 = DAG.getNode(
7906           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7907       SDValue Lo = SplitF64.getValue(0);
7908       SDValue Hi = SplitF64.getValue(1);
7909 
7910       Register RegLo = VA.getLocReg();
7911       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7912 
7913       if (RegLo == RISCV::X17) {
7914         // Second half of f64 is passed on the stack.
7915         // Work out the address of the stack slot.
7916         if (!StackPtr.getNode())
7917           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7918         // Emit the store.
7919         MemOpChains.push_back(
7920             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7921       } else {
7922         // Second half of f64 is passed in another GPR.
7923         assert(RegLo < RISCV::X31 && "Invalid register pair");
7924         Register RegHigh = RegLo + 1;
7925         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7926       }
7927       continue;
7928     }
7929 
7930     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7931     // as any other MemLoc.
7932 
7933     // Promote the value if needed.
7934     // For now, only handle fully promoted and indirect arguments.
7935     if (VA.getLocInfo() == CCValAssign::Indirect) {
7936       // Store the argument in a stack slot and pass its address.
7937       Align StackAlign =
7938           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7939                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7940       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7941       // If the original argument was split (e.g. i128), we need
7942       // to store the required parts of it here (and pass just one address).
7943       // Vectors may be partly split to registers and partly to the stack, in
7944       // which case the base address is partly offset and subsequent stores are
7945       // relative to that.
7946       unsigned ArgIndex = Outs[i].OrigArgIndex;
7947       unsigned ArgPartOffset = Outs[i].PartOffset;
7948       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7949       // Calculate the total size to store. We don't have access to what we're
7950       // actually storing other than performing the loop and collecting the
7951       // info.
7952       SmallVector<std::pair<SDValue, SDValue>> Parts;
7953       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7954         SDValue PartValue = OutVals[i + 1];
7955         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7956         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7957         EVT PartVT = PartValue.getValueType();
7958         if (PartVT.isScalableVector())
7959           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7960         StoredSize += PartVT.getStoreSize();
7961         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7962         Parts.push_back(std::make_pair(PartValue, Offset));
7963         ++i;
7964       }
7965       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7966       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7967       MemOpChains.push_back(
7968           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7969                        MachinePointerInfo::getFixedStack(MF, FI)));
7970       for (const auto &Part : Parts) {
7971         SDValue PartValue = Part.first;
7972         SDValue PartOffset = Part.second;
7973         SDValue Address =
7974             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7975         MemOpChains.push_back(
7976             DAG.getStore(Chain, DL, PartValue, Address,
7977                          MachinePointerInfo::getFixedStack(MF, FI)));
7978       }
7979       ArgValue = SpillSlot;
7980     } else {
7981       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7982     }
7983 
7984     // Use local copy if it is a byval arg.
7985     if (Flags.isByVal())
7986       ArgValue = ByValArgs[j++];
7987 
7988     if (VA.isRegLoc()) {
7989       // Queue up the argument copies and emit them at the end.
7990       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7991     } else {
7992       assert(VA.isMemLoc() && "Argument not register or memory");
7993       assert(!IsTailCall && "Tail call not allowed if stack is used "
7994                             "for passing parameters");
7995 
7996       // Work out the address of the stack slot.
7997       if (!StackPtr.getNode())
7998         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7999       SDValue Address =
8000           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8001                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8002 
8003       // Emit the store.
8004       MemOpChains.push_back(
8005           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8006     }
8007   }
8008 
8009   // Join the stores, which are independent of one another.
8010   if (!MemOpChains.empty())
8011     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8012 
8013   SDValue Glue;
8014 
8015   // Build a sequence of copy-to-reg nodes, chained and glued together.
8016   for (auto &Reg : RegsToPass) {
8017     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8018     Glue = Chain.getValue(1);
8019   }
8020 
8021   // Validate that none of the argument registers have been marked as
8022   // reserved, if so report an error. Do the same for the return address if this
8023   // is not a tailcall.
8024   validateCCReservedRegs(RegsToPass, MF);
8025   if (!IsTailCall &&
8026       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8027     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8028         MF.getFunction(),
8029         "Return address register required, but has been reserved."});
8030 
8031   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8032   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8033   // split it and then direct call can be matched by PseudoCALL.
8034   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8035     const GlobalValue *GV = S->getGlobal();
8036 
8037     unsigned OpFlags = RISCVII::MO_CALL;
8038     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8039       OpFlags = RISCVII::MO_PLT;
8040 
8041     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8042   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8043     unsigned OpFlags = RISCVII::MO_CALL;
8044 
8045     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8046                                                  nullptr))
8047       OpFlags = RISCVII::MO_PLT;
8048 
8049     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8050   }
8051 
8052   // The first call operand is the chain and the second is the target address.
8053   SmallVector<SDValue, 8> Ops;
8054   Ops.push_back(Chain);
8055   Ops.push_back(Callee);
8056 
8057   // Add argument registers to the end of the list so that they are
8058   // known live into the call.
8059   for (auto &Reg : RegsToPass)
8060     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8061 
8062   if (!IsTailCall) {
8063     // Add a register mask operand representing the call-preserved registers.
8064     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8065     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8066     assert(Mask && "Missing call preserved mask for calling convention");
8067     Ops.push_back(DAG.getRegisterMask(Mask));
8068   }
8069 
8070   // Glue the call to the argument copies, if any.
8071   if (Glue.getNode())
8072     Ops.push_back(Glue);
8073 
8074   // Emit the call.
8075   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8076 
8077   if (IsTailCall) {
8078     MF.getFrameInfo().setHasTailCall();
8079     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8080   }
8081 
8082   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8083   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8084   Glue = Chain.getValue(1);
8085 
8086   // Mark the end of the call, which is glued to the call itself.
8087   Chain = DAG.getCALLSEQ_END(Chain,
8088                              DAG.getConstant(NumBytes, DL, PtrVT, true),
8089                              DAG.getConstant(0, DL, PtrVT, true),
8090                              Glue, DL);
8091   Glue = Chain.getValue(1);
8092 
8093   // Assign locations to each value returned by this call.
8094   SmallVector<CCValAssign, 16> RVLocs;
8095   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8096   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8097 
8098   // Copy all of the result registers out of their specified physreg.
8099   for (auto &VA : RVLocs) {
8100     // Copy the value out
8101     SDValue RetValue =
8102         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8103     // Glue the RetValue to the end of the call sequence
8104     Chain = RetValue.getValue(1);
8105     Glue = RetValue.getValue(2);
8106 
8107     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8108       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8109       SDValue RetValue2 =
8110           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8111       Chain = RetValue2.getValue(1);
8112       Glue = RetValue2.getValue(2);
8113       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8114                              RetValue2);
8115     }
8116 
8117     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8118 
8119     InVals.push_back(RetValue);
8120   }
8121 
8122   return Chain;
8123 }
8124 
8125 bool RISCVTargetLowering::CanLowerReturn(
8126     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8127     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8128   SmallVector<CCValAssign, 16> RVLocs;
8129   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8130 
8131   Optional<unsigned> FirstMaskArgument;
8132   if (Subtarget.hasStdExtV())
8133     FirstMaskArgument = preAssignMask(Outs);
8134 
8135   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8136     MVT VT = Outs[i].VT;
8137     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8138     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8139     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8140                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8141                  *this, FirstMaskArgument))
8142       return false;
8143   }
8144   return true;
8145 }
8146 
8147 SDValue
8148 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8149                                  bool IsVarArg,
8150                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
8151                                  const SmallVectorImpl<SDValue> &OutVals,
8152                                  const SDLoc &DL, SelectionDAG &DAG) const {
8153   const MachineFunction &MF = DAG.getMachineFunction();
8154   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8155 
8156   // Stores the assignment of the return value to a location.
8157   SmallVector<CCValAssign, 16> RVLocs;
8158 
8159   // Info about the registers and stack slot.
8160   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
8161                  *DAG.getContext());
8162 
8163   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
8164                     nullptr, CC_RISCV);
8165 
8166   if (CallConv == CallingConv::GHC && !RVLocs.empty())
8167     report_fatal_error("GHC functions return void only");
8168 
8169   SDValue Glue;
8170   SmallVector<SDValue, 4> RetOps(1, Chain);
8171 
8172   // Copy the result values into the output registers.
8173   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
8174     SDValue Val = OutVals[i];
8175     CCValAssign &VA = RVLocs[i];
8176     assert(VA.isRegLoc() && "Can only return in registers!");
8177 
8178     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8179       // Handle returning f64 on RV32D with a soft float ABI.
8180       assert(VA.isRegLoc() && "Expected return via registers");
8181       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
8182                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
8183       SDValue Lo = SplitF64.getValue(0);
8184       SDValue Hi = SplitF64.getValue(1);
8185       Register RegLo = VA.getLocReg();
8186       assert(RegLo < RISCV::X31 && "Invalid register pair");
8187       Register RegHi = RegLo + 1;
8188 
8189       if (STI.isRegisterReservedByUser(RegLo) ||
8190           STI.isRegisterReservedByUser(RegHi))
8191         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8192             MF.getFunction(),
8193             "Return value register required, but has been reserved."});
8194 
8195       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8196       Glue = Chain.getValue(1);
8197       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8198       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8199       Glue = Chain.getValue(1);
8200       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8201     } else {
8202       // Handle a 'normal' return.
8203       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8204       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8205 
8206       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8207         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8208             MF.getFunction(),
8209             "Return value register required, but has been reserved."});
8210 
8211       // Guarantee that all emitted copies are stuck together.
8212       Glue = Chain.getValue(1);
8213       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8214     }
8215   }
8216 
8217   RetOps[0] = Chain; // Update chain.
8218 
8219   // Add the glue node if we have it.
8220   if (Glue.getNode()) {
8221     RetOps.push_back(Glue);
8222   }
8223 
8224   unsigned RetOpc = RISCVISD::RET_FLAG;
8225   // Interrupt service routines use different return instructions.
8226   const Function &Func = DAG.getMachineFunction().getFunction();
8227   if (Func.hasFnAttribute("interrupt")) {
8228     if (!Func.getReturnType()->isVoidTy())
8229       report_fatal_error(
8230           "Functions with the interrupt attribute must have void return type!");
8231 
8232     MachineFunction &MF = DAG.getMachineFunction();
8233     StringRef Kind =
8234       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8235 
8236     if (Kind == "user")
8237       RetOpc = RISCVISD::URET_FLAG;
8238     else if (Kind == "supervisor")
8239       RetOpc = RISCVISD::SRET_FLAG;
8240     else
8241       RetOpc = RISCVISD::MRET_FLAG;
8242   }
8243 
8244   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8245 }
8246 
8247 void RISCVTargetLowering::validateCCReservedRegs(
8248     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8249     MachineFunction &MF) const {
8250   const Function &F = MF.getFunction();
8251   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8252 
8253   if (llvm::any_of(Regs, [&STI](auto Reg) {
8254         return STI.isRegisterReservedByUser(Reg.first);
8255       }))
8256     F.getContext().diagnose(DiagnosticInfoUnsupported{
8257         F, "Argument register required, but has been reserved."});
8258 }
8259 
8260 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8261   return CI->isTailCall();
8262 }
8263 
8264 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8265 #define NODE_NAME_CASE(NODE)                                                   \
8266   case RISCVISD::NODE:                                                         \
8267     return "RISCVISD::" #NODE;
8268   // clang-format off
8269   switch ((RISCVISD::NodeType)Opcode) {
8270   case RISCVISD::FIRST_NUMBER:
8271     break;
8272   NODE_NAME_CASE(RET_FLAG)
8273   NODE_NAME_CASE(URET_FLAG)
8274   NODE_NAME_CASE(SRET_FLAG)
8275   NODE_NAME_CASE(MRET_FLAG)
8276   NODE_NAME_CASE(CALL)
8277   NODE_NAME_CASE(SELECT_CC)
8278   NODE_NAME_CASE(BR_CC)
8279   NODE_NAME_CASE(BuildPairF64)
8280   NODE_NAME_CASE(SplitF64)
8281   NODE_NAME_CASE(TAIL)
8282   NODE_NAME_CASE(MULHSU)
8283   NODE_NAME_CASE(SLLW)
8284   NODE_NAME_CASE(SRAW)
8285   NODE_NAME_CASE(SRLW)
8286   NODE_NAME_CASE(DIVW)
8287   NODE_NAME_CASE(DIVUW)
8288   NODE_NAME_CASE(REMUW)
8289   NODE_NAME_CASE(ROLW)
8290   NODE_NAME_CASE(RORW)
8291   NODE_NAME_CASE(CLZW)
8292   NODE_NAME_CASE(CTZW)
8293   NODE_NAME_CASE(FSLW)
8294   NODE_NAME_CASE(FSRW)
8295   NODE_NAME_CASE(FSL)
8296   NODE_NAME_CASE(FSR)
8297   NODE_NAME_CASE(FMV_H_X)
8298   NODE_NAME_CASE(FMV_X_ANYEXTH)
8299   NODE_NAME_CASE(FMV_W_X_RV64)
8300   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8301   NODE_NAME_CASE(READ_CYCLE_WIDE)
8302   NODE_NAME_CASE(GREV)
8303   NODE_NAME_CASE(GREVW)
8304   NODE_NAME_CASE(GORC)
8305   NODE_NAME_CASE(GORCW)
8306   NODE_NAME_CASE(SHFL)
8307   NODE_NAME_CASE(SHFLW)
8308   NODE_NAME_CASE(UNSHFL)
8309   NODE_NAME_CASE(UNSHFLW)
8310   NODE_NAME_CASE(BCOMPRESS)
8311   NODE_NAME_CASE(BCOMPRESSW)
8312   NODE_NAME_CASE(BDECOMPRESS)
8313   NODE_NAME_CASE(BDECOMPRESSW)
8314   NODE_NAME_CASE(VMV_V_X_VL)
8315   NODE_NAME_CASE(VFMV_V_F_VL)
8316   NODE_NAME_CASE(VMV_X_S)
8317   NODE_NAME_CASE(VMV_S_X_VL)
8318   NODE_NAME_CASE(VFMV_S_F_VL)
8319   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8320   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8321   NODE_NAME_CASE(READ_VLENB)
8322   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8323   NODE_NAME_CASE(VSLIDEUP_VL)
8324   NODE_NAME_CASE(VSLIDE1UP_VL)
8325   NODE_NAME_CASE(VSLIDEDOWN_VL)
8326   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8327   NODE_NAME_CASE(VID_VL)
8328   NODE_NAME_CASE(VFNCVT_ROD_VL)
8329   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8330   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8331   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8332   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8333   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8334   NODE_NAME_CASE(VECREDUCE_AND_VL)
8335   NODE_NAME_CASE(VECREDUCE_OR_VL)
8336   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8337   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8338   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8339   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8340   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8341   NODE_NAME_CASE(ADD_VL)
8342   NODE_NAME_CASE(AND_VL)
8343   NODE_NAME_CASE(MUL_VL)
8344   NODE_NAME_CASE(OR_VL)
8345   NODE_NAME_CASE(SDIV_VL)
8346   NODE_NAME_CASE(SHL_VL)
8347   NODE_NAME_CASE(SREM_VL)
8348   NODE_NAME_CASE(SRA_VL)
8349   NODE_NAME_CASE(SRL_VL)
8350   NODE_NAME_CASE(SUB_VL)
8351   NODE_NAME_CASE(UDIV_VL)
8352   NODE_NAME_CASE(UREM_VL)
8353   NODE_NAME_CASE(XOR_VL)
8354   NODE_NAME_CASE(FADD_VL)
8355   NODE_NAME_CASE(FSUB_VL)
8356   NODE_NAME_CASE(FMUL_VL)
8357   NODE_NAME_CASE(FDIV_VL)
8358   NODE_NAME_CASE(FNEG_VL)
8359   NODE_NAME_CASE(FABS_VL)
8360   NODE_NAME_CASE(FSQRT_VL)
8361   NODE_NAME_CASE(FMA_VL)
8362   NODE_NAME_CASE(FCOPYSIGN_VL)
8363   NODE_NAME_CASE(SMIN_VL)
8364   NODE_NAME_CASE(SMAX_VL)
8365   NODE_NAME_CASE(UMIN_VL)
8366   NODE_NAME_CASE(UMAX_VL)
8367   NODE_NAME_CASE(FMINNUM_VL)
8368   NODE_NAME_CASE(FMAXNUM_VL)
8369   NODE_NAME_CASE(MULHS_VL)
8370   NODE_NAME_CASE(MULHU_VL)
8371   NODE_NAME_CASE(FP_TO_SINT_VL)
8372   NODE_NAME_CASE(FP_TO_UINT_VL)
8373   NODE_NAME_CASE(SINT_TO_FP_VL)
8374   NODE_NAME_CASE(UINT_TO_FP_VL)
8375   NODE_NAME_CASE(FP_EXTEND_VL)
8376   NODE_NAME_CASE(FP_ROUND_VL)
8377   NODE_NAME_CASE(VWMUL_VL)
8378   NODE_NAME_CASE(VWMULU_VL)
8379   NODE_NAME_CASE(SETCC_VL)
8380   NODE_NAME_CASE(VSELECT_VL)
8381   NODE_NAME_CASE(VMAND_VL)
8382   NODE_NAME_CASE(VMOR_VL)
8383   NODE_NAME_CASE(VMXOR_VL)
8384   NODE_NAME_CASE(VMCLR_VL)
8385   NODE_NAME_CASE(VMSET_VL)
8386   NODE_NAME_CASE(VRGATHER_VX_VL)
8387   NODE_NAME_CASE(VRGATHER_VV_VL)
8388   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8389   NODE_NAME_CASE(VSEXT_VL)
8390   NODE_NAME_CASE(VZEXT_VL)
8391   NODE_NAME_CASE(VPOPC_VL)
8392   NODE_NAME_CASE(VLE_VL)
8393   NODE_NAME_CASE(VSE_VL)
8394   NODE_NAME_CASE(READ_CSR)
8395   NODE_NAME_CASE(WRITE_CSR)
8396   NODE_NAME_CASE(SWAP_CSR)
8397   }
8398   // clang-format on
8399   return nullptr;
8400 #undef NODE_NAME_CASE
8401 }
8402 
8403 /// getConstraintType - Given a constraint letter, return the type of
8404 /// constraint it is for this target.
8405 RISCVTargetLowering::ConstraintType
8406 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8407   if (Constraint.size() == 1) {
8408     switch (Constraint[0]) {
8409     default:
8410       break;
8411     case 'f':
8412     case 'v':
8413       return C_RegisterClass;
8414     case 'I':
8415     case 'J':
8416     case 'K':
8417       return C_Immediate;
8418     case 'A':
8419       return C_Memory;
8420     case 'S': // A symbolic address
8421       return C_Other;
8422     }
8423   }
8424   return TargetLowering::getConstraintType(Constraint);
8425 }
8426 
8427 std::pair<unsigned, const TargetRegisterClass *>
8428 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8429                                                   StringRef Constraint,
8430                                                   MVT VT) const {
8431   // First, see if this is a constraint that directly corresponds to a
8432   // RISCV register class.
8433   if (Constraint.size() == 1) {
8434     switch (Constraint[0]) {
8435     case 'r':
8436       return std::make_pair(0U, &RISCV::GPRRegClass);
8437     case 'f':
8438       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8439         return std::make_pair(0U, &RISCV::FPR16RegClass);
8440       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8441         return std::make_pair(0U, &RISCV::FPR32RegClass);
8442       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8443         return std::make_pair(0U, &RISCV::FPR64RegClass);
8444       break;
8445     case 'v':
8446       for (const auto *RC :
8447            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8448             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8449         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8450           return std::make_pair(0U, RC);
8451       }
8452       break;
8453     default:
8454       break;
8455     }
8456   }
8457 
8458   // Clang will correctly decode the usage of register name aliases into their
8459   // official names. However, other frontends like `rustc` do not. This allows
8460   // users of these frontends to use the ABI names for registers in LLVM-style
8461   // register constraints.
8462   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8463                                .Case("{zero}", RISCV::X0)
8464                                .Case("{ra}", RISCV::X1)
8465                                .Case("{sp}", RISCV::X2)
8466                                .Case("{gp}", RISCV::X3)
8467                                .Case("{tp}", RISCV::X4)
8468                                .Case("{t0}", RISCV::X5)
8469                                .Case("{t1}", RISCV::X6)
8470                                .Case("{t2}", RISCV::X7)
8471                                .Cases("{s0}", "{fp}", RISCV::X8)
8472                                .Case("{s1}", RISCV::X9)
8473                                .Case("{a0}", RISCV::X10)
8474                                .Case("{a1}", RISCV::X11)
8475                                .Case("{a2}", RISCV::X12)
8476                                .Case("{a3}", RISCV::X13)
8477                                .Case("{a4}", RISCV::X14)
8478                                .Case("{a5}", RISCV::X15)
8479                                .Case("{a6}", RISCV::X16)
8480                                .Case("{a7}", RISCV::X17)
8481                                .Case("{s2}", RISCV::X18)
8482                                .Case("{s3}", RISCV::X19)
8483                                .Case("{s4}", RISCV::X20)
8484                                .Case("{s5}", RISCV::X21)
8485                                .Case("{s6}", RISCV::X22)
8486                                .Case("{s7}", RISCV::X23)
8487                                .Case("{s8}", RISCV::X24)
8488                                .Case("{s9}", RISCV::X25)
8489                                .Case("{s10}", RISCV::X26)
8490                                .Case("{s11}", RISCV::X27)
8491                                .Case("{t3}", RISCV::X28)
8492                                .Case("{t4}", RISCV::X29)
8493                                .Case("{t5}", RISCV::X30)
8494                                .Case("{t6}", RISCV::X31)
8495                                .Default(RISCV::NoRegister);
8496   if (XRegFromAlias != RISCV::NoRegister)
8497     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8498 
8499   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8500   // TableGen record rather than the AsmName to choose registers for InlineAsm
8501   // constraints, plus we want to match those names to the widest floating point
8502   // register type available, manually select floating point registers here.
8503   //
8504   // The second case is the ABI name of the register, so that frontends can also
8505   // use the ABI names in register constraint lists.
8506   if (Subtarget.hasStdExtF()) {
8507     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8508                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8509                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8510                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8511                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8512                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8513                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8514                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8515                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8516                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8517                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8518                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8519                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8520                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8521                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8522                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8523                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8524                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8525                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8526                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8527                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8528                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8529                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8530                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8531                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8532                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8533                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8534                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8535                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8536                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8537                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8538                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8539                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8540                         .Default(RISCV::NoRegister);
8541     if (FReg != RISCV::NoRegister) {
8542       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8543       if (Subtarget.hasStdExtD()) {
8544         unsigned RegNo = FReg - RISCV::F0_F;
8545         unsigned DReg = RISCV::F0_D + RegNo;
8546         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8547       }
8548       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8549     }
8550   }
8551 
8552   if (Subtarget.hasStdExtV()) {
8553     Register VReg = StringSwitch<Register>(Constraint.lower())
8554                         .Case("{v0}", RISCV::V0)
8555                         .Case("{v1}", RISCV::V1)
8556                         .Case("{v2}", RISCV::V2)
8557                         .Case("{v3}", RISCV::V3)
8558                         .Case("{v4}", RISCV::V4)
8559                         .Case("{v5}", RISCV::V5)
8560                         .Case("{v6}", RISCV::V6)
8561                         .Case("{v7}", RISCV::V7)
8562                         .Case("{v8}", RISCV::V8)
8563                         .Case("{v9}", RISCV::V9)
8564                         .Case("{v10}", RISCV::V10)
8565                         .Case("{v11}", RISCV::V11)
8566                         .Case("{v12}", RISCV::V12)
8567                         .Case("{v13}", RISCV::V13)
8568                         .Case("{v14}", RISCV::V14)
8569                         .Case("{v15}", RISCV::V15)
8570                         .Case("{v16}", RISCV::V16)
8571                         .Case("{v17}", RISCV::V17)
8572                         .Case("{v18}", RISCV::V18)
8573                         .Case("{v19}", RISCV::V19)
8574                         .Case("{v20}", RISCV::V20)
8575                         .Case("{v21}", RISCV::V21)
8576                         .Case("{v22}", RISCV::V22)
8577                         .Case("{v23}", RISCV::V23)
8578                         .Case("{v24}", RISCV::V24)
8579                         .Case("{v25}", RISCV::V25)
8580                         .Case("{v26}", RISCV::V26)
8581                         .Case("{v27}", RISCV::V27)
8582                         .Case("{v28}", RISCV::V28)
8583                         .Case("{v29}", RISCV::V29)
8584                         .Case("{v30}", RISCV::V30)
8585                         .Case("{v31}", RISCV::V31)
8586                         .Default(RISCV::NoRegister);
8587     if (VReg != RISCV::NoRegister) {
8588       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8589         return std::make_pair(VReg, &RISCV::VMRegClass);
8590       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8591         return std::make_pair(VReg, &RISCV::VRRegClass);
8592       for (const auto *RC :
8593            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8594         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8595           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8596           return std::make_pair(VReg, RC);
8597         }
8598       }
8599     }
8600   }
8601 
8602   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8603 }
8604 
8605 unsigned
8606 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8607   // Currently only support length 1 constraints.
8608   if (ConstraintCode.size() == 1) {
8609     switch (ConstraintCode[0]) {
8610     case 'A':
8611       return InlineAsm::Constraint_A;
8612     default:
8613       break;
8614     }
8615   }
8616 
8617   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8618 }
8619 
8620 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8621     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8622     SelectionDAG &DAG) const {
8623   // Currently only support length 1 constraints.
8624   if (Constraint.length() == 1) {
8625     switch (Constraint[0]) {
8626     case 'I':
8627       // Validate & create a 12-bit signed immediate operand.
8628       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8629         uint64_t CVal = C->getSExtValue();
8630         if (isInt<12>(CVal))
8631           Ops.push_back(
8632               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8633       }
8634       return;
8635     case 'J':
8636       // Validate & create an integer zero operand.
8637       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8638         if (C->getZExtValue() == 0)
8639           Ops.push_back(
8640               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8641       return;
8642     case 'K':
8643       // Validate & create a 5-bit unsigned immediate operand.
8644       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8645         uint64_t CVal = C->getZExtValue();
8646         if (isUInt<5>(CVal))
8647           Ops.push_back(
8648               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8649       }
8650       return;
8651     case 'S':
8652       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
8653         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
8654                                                  GA->getValueType(0)));
8655       } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
8656         Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
8657                                                 BA->getValueType(0)));
8658       }
8659       return;
8660     default:
8661       break;
8662     }
8663   }
8664   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8665 }
8666 
8667 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8668                                                    Instruction *Inst,
8669                                                    AtomicOrdering Ord) const {
8670   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8671     return Builder.CreateFence(Ord);
8672   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8673     return Builder.CreateFence(AtomicOrdering::Release);
8674   return nullptr;
8675 }
8676 
8677 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8678                                                     Instruction *Inst,
8679                                                     AtomicOrdering Ord) const {
8680   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8681     return Builder.CreateFence(AtomicOrdering::Acquire);
8682   return nullptr;
8683 }
8684 
8685 TargetLowering::AtomicExpansionKind
8686 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8687   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8688   // point operations can't be used in an lr/sc sequence without breaking the
8689   // forward-progress guarantee.
8690   if (AI->isFloatingPointOperation())
8691     return AtomicExpansionKind::CmpXChg;
8692 
8693   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8694   if (Size == 8 || Size == 16)
8695     return AtomicExpansionKind::MaskedIntrinsic;
8696   return AtomicExpansionKind::None;
8697 }
8698 
8699 static Intrinsic::ID
8700 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8701   if (XLen == 32) {
8702     switch (BinOp) {
8703     default:
8704       llvm_unreachable("Unexpected AtomicRMW BinOp");
8705     case AtomicRMWInst::Xchg:
8706       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8707     case AtomicRMWInst::Add:
8708       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8709     case AtomicRMWInst::Sub:
8710       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8711     case AtomicRMWInst::Nand:
8712       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8713     case AtomicRMWInst::Max:
8714       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8715     case AtomicRMWInst::Min:
8716       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8717     case AtomicRMWInst::UMax:
8718       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8719     case AtomicRMWInst::UMin:
8720       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8721     }
8722   }
8723 
8724   if (XLen == 64) {
8725     switch (BinOp) {
8726     default:
8727       llvm_unreachable("Unexpected AtomicRMW BinOp");
8728     case AtomicRMWInst::Xchg:
8729       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8730     case AtomicRMWInst::Add:
8731       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8732     case AtomicRMWInst::Sub:
8733       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8734     case AtomicRMWInst::Nand:
8735       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8736     case AtomicRMWInst::Max:
8737       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8738     case AtomicRMWInst::Min:
8739       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8740     case AtomicRMWInst::UMax:
8741       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8742     case AtomicRMWInst::UMin:
8743       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8744     }
8745   }
8746 
8747   llvm_unreachable("Unexpected XLen\n");
8748 }
8749 
8750 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8751     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8752     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8753   unsigned XLen = Subtarget.getXLen();
8754   Value *Ordering =
8755       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8756   Type *Tys[] = {AlignedAddr->getType()};
8757   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8758       AI->getModule(),
8759       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8760 
8761   if (XLen == 64) {
8762     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8763     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8764     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8765   }
8766 
8767   Value *Result;
8768 
8769   // Must pass the shift amount needed to sign extend the loaded value prior
8770   // to performing a signed comparison for min/max. ShiftAmt is the number of
8771   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8772   // is the number of bits to left+right shift the value in order to
8773   // sign-extend.
8774   if (AI->getOperation() == AtomicRMWInst::Min ||
8775       AI->getOperation() == AtomicRMWInst::Max) {
8776     const DataLayout &DL = AI->getModule()->getDataLayout();
8777     unsigned ValWidth =
8778         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8779     Value *SextShamt =
8780         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8781     Result = Builder.CreateCall(LrwOpScwLoop,
8782                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8783   } else {
8784     Result =
8785         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8786   }
8787 
8788   if (XLen == 64)
8789     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8790   return Result;
8791 }
8792 
8793 TargetLowering::AtomicExpansionKind
8794 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8795     AtomicCmpXchgInst *CI) const {
8796   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8797   if (Size == 8 || Size == 16)
8798     return AtomicExpansionKind::MaskedIntrinsic;
8799   return AtomicExpansionKind::None;
8800 }
8801 
8802 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8803     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8804     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8805   unsigned XLen = Subtarget.getXLen();
8806   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8807   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8808   if (XLen == 64) {
8809     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8810     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8811     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8812     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8813   }
8814   Type *Tys[] = {AlignedAddr->getType()};
8815   Function *MaskedCmpXchg =
8816       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8817   Value *Result = Builder.CreateCall(
8818       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8819   if (XLen == 64)
8820     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8821   return Result;
8822 }
8823 
8824 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8825   return false;
8826 }
8827 
8828 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8829                                                      EVT VT) const {
8830   VT = VT.getScalarType();
8831 
8832   if (!VT.isSimple())
8833     return false;
8834 
8835   switch (VT.getSimpleVT().SimpleTy) {
8836   case MVT::f16:
8837     return Subtarget.hasStdExtZfh();
8838   case MVT::f32:
8839     return Subtarget.hasStdExtF();
8840   case MVT::f64:
8841     return Subtarget.hasStdExtD();
8842   default:
8843     break;
8844   }
8845 
8846   return false;
8847 }
8848 
8849 Register RISCVTargetLowering::getExceptionPointerRegister(
8850     const Constant *PersonalityFn) const {
8851   return RISCV::X10;
8852 }
8853 
8854 Register RISCVTargetLowering::getExceptionSelectorRegister(
8855     const Constant *PersonalityFn) const {
8856   return RISCV::X11;
8857 }
8858 
8859 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8860   // Return false to suppress the unnecessary extensions if the LibCall
8861   // arguments or return value is f32 type for LP64 ABI.
8862   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8863   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8864     return false;
8865 
8866   return true;
8867 }
8868 
8869 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8870   if (Subtarget.is64Bit() && Type == MVT::i32)
8871     return true;
8872 
8873   return IsSigned;
8874 }
8875 
8876 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8877                                                  SDValue C) const {
8878   // Check integral scalar types.
8879   if (VT.isScalarInteger()) {
8880     // Omit the optimization if the sub target has the M extension and the data
8881     // size exceeds XLen.
8882     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8883       return false;
8884     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8885       // Break the MUL to a SLLI and an ADD/SUB.
8886       const APInt &Imm = ConstNode->getAPIntValue();
8887       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8888           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8889         return true;
8890       // Omit the following optimization if the sub target has the M extension
8891       // and the data size >= XLen.
8892       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8893         return false;
8894       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8895       // a pair of LUI/ADDI.
8896       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8897         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8898         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8899             (1 - ImmS).isPowerOf2())
8900         return true;
8901       }
8902     }
8903   }
8904 
8905   return false;
8906 }
8907 
8908 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8909     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8910     bool *Fast) const {
8911   if (!VT.isVector())
8912     return false;
8913 
8914   EVT ElemVT = VT.getVectorElementType();
8915   if (Alignment >= ElemVT.getStoreSize()) {
8916     if (Fast)
8917       *Fast = true;
8918     return true;
8919   }
8920 
8921   return false;
8922 }
8923 
8924 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8925     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8926     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8927   bool IsABIRegCopy = CC.hasValue();
8928   EVT ValueVT = Val.getValueType();
8929   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8930     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8931     // and cast to f32.
8932     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8933     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8934     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8935                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8936     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8937     Parts[0] = Val;
8938     return true;
8939   }
8940 
8941   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8942     LLVMContext &Context = *DAG.getContext();
8943     EVT ValueEltVT = ValueVT.getVectorElementType();
8944     EVT PartEltVT = PartVT.getVectorElementType();
8945     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8946     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8947     if (PartVTBitSize % ValueVTBitSize == 0) {
8948       // If the element types are different, bitcast to the same element type of
8949       // PartVT first.
8950       if (ValueEltVT != PartEltVT) {
8951         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8952         assert(Count != 0 && "The number of element should not be zero.");
8953         EVT SameEltTypeVT =
8954             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8955         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8956       }
8957       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8958                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8959       Parts[0] = Val;
8960       return true;
8961     }
8962   }
8963   return false;
8964 }
8965 
8966 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8967     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8968     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8969   bool IsABIRegCopy = CC.hasValue();
8970   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8971     SDValue Val = Parts[0];
8972 
8973     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8974     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8975     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8976     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8977     return Val;
8978   }
8979 
8980   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8981     LLVMContext &Context = *DAG.getContext();
8982     SDValue Val = Parts[0];
8983     EVT ValueEltVT = ValueVT.getVectorElementType();
8984     EVT PartEltVT = PartVT.getVectorElementType();
8985     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8986     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8987     if (PartVTBitSize % ValueVTBitSize == 0) {
8988       EVT SameEltTypeVT = ValueVT;
8989       // If the element types are different, convert it to the same element type
8990       // of PartVT.
8991       if (ValueEltVT != PartEltVT) {
8992         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8993         assert(Count != 0 && "The number of element should not be zero.");
8994         SameEltTypeVT =
8995             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8996       }
8997       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8998                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8999       if (ValueEltVT != PartEltVT)
9000         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9001       return Val;
9002     }
9003   }
9004   return SDValue();
9005 }
9006 
9007 #define GET_REGISTER_MATCHER
9008 #include "RISCVGenAsmMatcher.inc"
9009 
9010 Register
9011 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9012                                        const MachineFunction &MF) const {
9013   Register Reg = MatchRegisterAltName(RegName);
9014   if (Reg == RISCV::NoRegister)
9015     Reg = MatchRegisterName(RegName);
9016   if (Reg == RISCV::NoRegister)
9017     report_fatal_error(
9018         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9019   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9020   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9021     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9022                              StringRef(RegName) + "\"."));
9023   return Reg;
9024 }
9025 
9026 namespace llvm {
9027 namespace RISCVVIntrinsicsTable {
9028 
9029 #define GET_RISCVVIntrinsicsTable_IMPL
9030 #include "RISCVGenSearchableTables.inc"
9031 
9032 } // namespace RISCVVIntrinsicsTable
9033 
9034 } // namespace llvm
9035