1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     for (auto CC : FPCCToExpand)
318       setCondCodeAction(CC, MVT::f16, Expand);
319     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
320     setOperationAction(ISD::SELECT, MVT::f16, Custom);
321     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
322     for (auto Op : FPOpToExpand)
323       setOperationAction(Op, MVT::f16, Expand);
324   }
325 
326   if (Subtarget.hasStdExtF()) {
327     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
328     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
329     for (auto CC : FPCCToExpand)
330       setCondCodeAction(CC, MVT::f32, Expand);
331     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
332     setOperationAction(ISD::SELECT, MVT::f32, Custom);
333     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
334     for (auto Op : FPOpToExpand)
335       setOperationAction(Op, MVT::f32, Expand);
336     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
337     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
338   }
339 
340   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
341     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
342 
343   if (Subtarget.hasStdExtD()) {
344     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
345     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
346     for (auto CC : FPCCToExpand)
347       setCondCodeAction(CC, MVT::f64, Expand);
348     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
349     setOperationAction(ISD::SELECT, MVT::f64, Custom);
350     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
353     for (auto Op : FPOpToExpand)
354       setOperationAction(Op, MVT::f64, Expand);
355     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
356     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
357   }
358 
359   if (Subtarget.is64Bit()) {
360     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
362     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
363     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
364   }
365 
366   if (Subtarget.hasStdExtF()) {
367     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
368     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
369   }
370 
371   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
372   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
373   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
374   setOperationAction(ISD::JumpTable, XLenVT, Custom);
375 
376   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
377 
378   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
379   // Unfortunately this can't be determined just from the ISA naming string.
380   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
381                      Subtarget.is64Bit() ? Legal : Custom);
382 
383   setOperationAction(ISD::TRAP, MVT::Other, Legal);
384   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
385   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
386   if (Subtarget.is64Bit())
387     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
388 
389   if (Subtarget.hasStdExtA()) {
390     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
391     setMinCmpXchgSizeInBits(32);
392   } else {
393     setMaxAtomicSizeInBitsSupported(0);
394   }
395 
396   setBooleanContents(ZeroOrOneBooleanContent);
397 
398   if (Subtarget.hasStdExtV()) {
399     setBooleanVectorContents(ZeroOrOneBooleanContent);
400 
401     setOperationAction(ISD::VSCALE, XLenVT, Custom);
402 
403     // RVV intrinsics may have illegal operands.
404     // We also need to custom legalize vmv.x.s.
405     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
406     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
407     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
408     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
409     if (Subtarget.is64Bit()) {
410       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
411     } else {
412       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
413       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
414     }
415 
416     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
417 
418     static unsigned IntegerVPOps[] = {
419         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
420         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
421         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
422 
423     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
424                                             ISD::VP_FMUL, ISD::VP_FDIV};
425 
426     if (!Subtarget.is64Bit()) {
427       // We must custom-lower certain vXi64 operations on RV32 due to the vector
428       // element type being illegal.
429       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
430       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
431 
432       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
433       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
434       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
435       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
436       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
437       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
438       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
439       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
440     }
441 
442     for (MVT VT : BoolVecVTs) {
443       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
444 
445       // Mask VTs are custom-expanded into a series of standard nodes
446       setOperationAction(ISD::TRUNCATE, VT, Custom);
447       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
448       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
449       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
450 
451       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
452       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
453 
454       setOperationAction(ISD::SELECT, VT, Custom);
455       setOperationAction(ISD::SELECT_CC, VT, Expand);
456       setOperationAction(ISD::VSELECT, VT, Expand);
457 
458       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
459       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
460       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
461 
462       // RVV has native int->float & float->int conversions where the
463       // element type sizes are within one power-of-two of each other. Any
464       // wider distances between type sizes have to be lowered as sequences
465       // which progressively narrow the gap in stages.
466       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
467       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
468       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
469       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
470 
471       // Expand all extending loads to types larger than this, and truncating
472       // stores from types larger than this.
473       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
474         setTruncStoreAction(OtherVT, VT, Expand);
475         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
476         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
477         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
478       }
479     }
480 
481     for (MVT VT : IntVecVTs) {
482       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
483       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
484 
485       setOperationAction(ISD::SMIN, VT, Legal);
486       setOperationAction(ISD::SMAX, VT, Legal);
487       setOperationAction(ISD::UMIN, VT, Legal);
488       setOperationAction(ISD::UMAX, VT, Legal);
489 
490       setOperationAction(ISD::ROTL, VT, Expand);
491       setOperationAction(ISD::ROTR, VT, Expand);
492 
493       // Custom-lower extensions and truncations from/to mask types.
494       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
495       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
496       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
497 
498       // RVV has native int->float & float->int conversions where the
499       // element type sizes are within one power-of-two of each other. Any
500       // wider distances between type sizes have to be lowered as sequences
501       // which progressively narrow the gap in stages.
502       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
503       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
504       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
505       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
506 
507       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
508       // nodes which truncate by one power of two at a time.
509       setOperationAction(ISD::TRUNCATE, VT, Custom);
510 
511       // Custom-lower insert/extract operations to simplify patterns.
512       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
513       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
514 
515       // Custom-lower reduction operations to set up the corresponding custom
516       // nodes' operands.
517       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
518       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
519       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
520       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
521       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
522       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
523       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
524       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
525 
526       for (unsigned VPOpc : IntegerVPOps)
527         setOperationAction(VPOpc, VT, Custom);
528 
529       setOperationAction(ISD::LOAD, VT, Custom);
530       setOperationAction(ISD::STORE, VT, Custom);
531 
532       setOperationAction(ISD::MLOAD, VT, Custom);
533       setOperationAction(ISD::MSTORE, VT, Custom);
534       setOperationAction(ISD::MGATHER, VT, Custom);
535       setOperationAction(ISD::MSCATTER, VT, Custom);
536 
537       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
538       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
539       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
540 
541       setOperationAction(ISD::SELECT, VT, Custom);
542       setOperationAction(ISD::SELECT_CC, VT, Expand);
543 
544       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
545       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
546 
547       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
548         setTruncStoreAction(VT, OtherVT, Expand);
549         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
550         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
551         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
552       }
553     }
554 
555     // Expand various CCs to best match the RVV ISA, which natively supports UNE
556     // but no other unordered comparisons, and supports all ordered comparisons
557     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
558     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
559     // and we pattern-match those back to the "original", swapping operands once
560     // more. This way we catch both operations and both "vf" and "fv" forms with
561     // fewer patterns.
562     ISD::CondCode VFPCCToExpand[] = {
563         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
564         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
565         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
566     };
567 
568     // Sets common operation actions on RVV floating-point vector types.
569     const auto SetCommonVFPActions = [&](MVT VT) {
570       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
571       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
572       // sizes are within one power-of-two of each other. Therefore conversions
573       // between vXf16 and vXf64 must be lowered as sequences which convert via
574       // vXf32.
575       setOperationAction(ISD::FP_ROUND, VT, Custom);
576       setOperationAction(ISD::FP_EXTEND, VT, Custom);
577       // Custom-lower insert/extract operations to simplify patterns.
578       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
579       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
580       // Expand various condition codes (explained above).
581       for (auto CC : VFPCCToExpand)
582         setCondCodeAction(CC, VT, Expand);
583 
584       setOperationAction(ISD::FMINNUM, VT, Legal);
585       setOperationAction(ISD::FMAXNUM, VT, Legal);
586 
587       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
588       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
589       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
590       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
591       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
592 
593       setOperationAction(ISD::LOAD, VT, Custom);
594       setOperationAction(ISD::STORE, VT, Custom);
595 
596       setOperationAction(ISD::MLOAD, VT, Custom);
597       setOperationAction(ISD::MSTORE, VT, Custom);
598       setOperationAction(ISD::MGATHER, VT, Custom);
599       setOperationAction(ISD::MSCATTER, VT, Custom);
600 
601       setOperationAction(ISD::SELECT, VT, Custom);
602       setOperationAction(ISD::SELECT_CC, VT, Expand);
603 
604       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
605       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
606       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
607 
608       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
609 
610       for (unsigned VPOpc : FloatingPointVPOps)
611         setOperationAction(VPOpc, VT, Custom);
612     };
613 
614     // Sets common extload/truncstore actions on RVV floating-point vector
615     // types.
616     const auto SetCommonVFPExtLoadTruncStoreActions =
617         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
618           for (auto SmallVT : SmallerVTs) {
619             setTruncStoreAction(VT, SmallVT, Expand);
620             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
621           }
622         };
623 
624     if (Subtarget.hasStdExtZfh())
625       for (MVT VT : F16VecVTs)
626         SetCommonVFPActions(VT);
627 
628     for (MVT VT : F32VecVTs) {
629       if (Subtarget.hasStdExtF())
630         SetCommonVFPActions(VT);
631       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
632     }
633 
634     for (MVT VT : F64VecVTs) {
635       if (Subtarget.hasStdExtD())
636         SetCommonVFPActions(VT);
637       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
638       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
639     }
640 
641     if (Subtarget.useRVVForFixedLengthVectors()) {
642       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
643         if (!useRVVForFixedLengthVectorVT(VT))
644           continue;
645 
646         // By default everything must be expanded.
647         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
648           setOperationAction(Op, VT, Expand);
649         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
650           setTruncStoreAction(VT, OtherVT, Expand);
651           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
652           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
653           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
654         }
655 
656         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
657         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
658         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
659 
660         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
661         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
662 
663         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
664         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
665 
666         setOperationAction(ISD::LOAD, VT, Custom);
667         setOperationAction(ISD::STORE, VT, Custom);
668 
669         setOperationAction(ISD::SETCC, VT, Custom);
670 
671         setOperationAction(ISD::SELECT, VT, Custom);
672 
673         setOperationAction(ISD::TRUNCATE, VT, Custom);
674 
675         setOperationAction(ISD::BITCAST, VT, Custom);
676 
677         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
678         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
679         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
680 
681         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
682         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
683         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
684         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
685 
686         // Operations below are different for between masks and other vectors.
687         if (VT.getVectorElementType() == MVT::i1) {
688           setOperationAction(ISD::AND, VT, Custom);
689           setOperationAction(ISD::OR, VT, Custom);
690           setOperationAction(ISD::XOR, VT, Custom);
691           continue;
692         }
693 
694         // Use SPLAT_VECTOR to prevent type legalization from destroying the
695         // splats when type legalizing i64 scalar on RV32.
696         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
697         // improvements first.
698         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
699           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
700           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
701         }
702 
703         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
704         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
705 
706         setOperationAction(ISD::MLOAD, VT, Custom);
707         setOperationAction(ISD::MSTORE, VT, Custom);
708         setOperationAction(ISD::MGATHER, VT, Custom);
709         setOperationAction(ISD::MSCATTER, VT, Custom);
710         setOperationAction(ISD::ADD, VT, Custom);
711         setOperationAction(ISD::MUL, VT, Custom);
712         setOperationAction(ISD::SUB, VT, Custom);
713         setOperationAction(ISD::AND, VT, Custom);
714         setOperationAction(ISD::OR, VT, Custom);
715         setOperationAction(ISD::XOR, VT, Custom);
716         setOperationAction(ISD::SDIV, VT, Custom);
717         setOperationAction(ISD::SREM, VT, Custom);
718         setOperationAction(ISD::UDIV, VT, Custom);
719         setOperationAction(ISD::UREM, VT, Custom);
720         setOperationAction(ISD::SHL, VT, Custom);
721         setOperationAction(ISD::SRA, VT, Custom);
722         setOperationAction(ISD::SRL, VT, Custom);
723 
724         setOperationAction(ISD::SMIN, VT, Custom);
725         setOperationAction(ISD::SMAX, VT, Custom);
726         setOperationAction(ISD::UMIN, VT, Custom);
727         setOperationAction(ISD::UMAX, VT, Custom);
728         setOperationAction(ISD::ABS,  VT, Custom);
729 
730         setOperationAction(ISD::MULHS, VT, Custom);
731         setOperationAction(ISD::MULHU, VT, Custom);
732 
733         setOperationAction(ISD::VSELECT, VT, Custom);
734         setOperationAction(ISD::SELECT_CC, VT, Expand);
735 
736         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
737         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
738         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
739 
740         // Custom-lower reduction operations to set up the corresponding custom
741         // nodes' operands.
742         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
743         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
744         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
745         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
746         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
747 
748         for (unsigned VPOpc : IntegerVPOps)
749           setOperationAction(VPOpc, VT, Custom);
750       }
751 
752       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
753         if (!useRVVForFixedLengthVectorVT(VT))
754           continue;
755 
756         // By default everything must be expanded.
757         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
758           setOperationAction(Op, VT, Expand);
759         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
760           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
761           setTruncStoreAction(VT, OtherVT, Expand);
762         }
763 
764         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
765         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
766         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
767 
768         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
769         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
770         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
771         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
772 
773         setOperationAction(ISD::LOAD, VT, Custom);
774         setOperationAction(ISD::STORE, VT, Custom);
775         setOperationAction(ISD::MLOAD, VT, Custom);
776         setOperationAction(ISD::MSTORE, VT, Custom);
777         setOperationAction(ISD::MGATHER, VT, Custom);
778         setOperationAction(ISD::MSCATTER, VT, Custom);
779         setOperationAction(ISD::FADD, VT, Custom);
780         setOperationAction(ISD::FSUB, VT, Custom);
781         setOperationAction(ISD::FMUL, VT, Custom);
782         setOperationAction(ISD::FDIV, VT, Custom);
783         setOperationAction(ISD::FNEG, VT, Custom);
784         setOperationAction(ISD::FABS, VT, Custom);
785         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
786         setOperationAction(ISD::FSQRT, VT, Custom);
787         setOperationAction(ISD::FMA, VT, Custom);
788         setOperationAction(ISD::FMINNUM, VT, Custom);
789         setOperationAction(ISD::FMAXNUM, VT, Custom);
790 
791         setOperationAction(ISD::FP_ROUND, VT, Custom);
792         setOperationAction(ISD::FP_EXTEND, VT, Custom);
793 
794         for (auto CC : VFPCCToExpand)
795           setCondCodeAction(CC, VT, Expand);
796 
797         setOperationAction(ISD::VSELECT, VT, Custom);
798         setOperationAction(ISD::SELECT, VT, Custom);
799         setOperationAction(ISD::SELECT_CC, VT, Expand);
800 
801         setOperationAction(ISD::BITCAST, VT, Custom);
802 
803         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
804         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
805         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
806         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
807 
808         for (unsigned VPOpc : FloatingPointVPOps)
809           setOperationAction(VPOpc, VT, Custom);
810       }
811 
812       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
813       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
814       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
815       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
816       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
817       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
818       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
819       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
820     }
821   }
822 
823   // Function alignments.
824   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
825   setMinFunctionAlignment(FunctionAlignment);
826   setPrefFunctionAlignment(FunctionAlignment);
827 
828   setMinimumJumpTableEntries(5);
829 
830   // Jumps are expensive, compared to logic
831   setJumpIsExpensive();
832 
833   // We can use any register for comparisons
834   setHasMultipleConditionRegisters();
835 
836   setTargetDAGCombine(ISD::AND);
837   setTargetDAGCombine(ISD::OR);
838   setTargetDAGCombine(ISD::XOR);
839   setTargetDAGCombine(ISD::ANY_EXTEND);
840   if (Subtarget.hasStdExtV()) {
841     setTargetDAGCombine(ISD::FCOPYSIGN);
842     setTargetDAGCombine(ISD::MGATHER);
843     setTargetDAGCombine(ISD::MSCATTER);
844     setTargetDAGCombine(ISD::SRA);
845     setTargetDAGCombine(ISD::SRL);
846     setTargetDAGCombine(ISD::SHL);
847   }
848 }
849 
850 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
851                                             LLVMContext &Context,
852                                             EVT VT) const {
853   if (!VT.isVector())
854     return getPointerTy(DL);
855   if (Subtarget.hasStdExtV() &&
856       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
857     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
858   return VT.changeVectorElementTypeToInteger();
859 }
860 
861 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
862   return Subtarget.getXLenVT();
863 }
864 
865 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
866                                              const CallInst &I,
867                                              MachineFunction &MF,
868                                              unsigned Intrinsic) const {
869   switch (Intrinsic) {
870   default:
871     return false;
872   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
873   case Intrinsic::riscv_masked_atomicrmw_add_i32:
874   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
875   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
876   case Intrinsic::riscv_masked_atomicrmw_max_i32:
877   case Intrinsic::riscv_masked_atomicrmw_min_i32:
878   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
879   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
880   case Intrinsic::riscv_masked_cmpxchg_i32:
881     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
882     Info.opc = ISD::INTRINSIC_W_CHAIN;
883     Info.memVT = MVT::getVT(PtrTy->getElementType());
884     Info.ptrVal = I.getArgOperand(0);
885     Info.offset = 0;
886     Info.align = Align(4);
887     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
888                  MachineMemOperand::MOVolatile;
889     return true;
890   }
891 }
892 
893 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
894                                                 const AddrMode &AM, Type *Ty,
895                                                 unsigned AS,
896                                                 Instruction *I) const {
897   // No global is ever allowed as a base.
898   if (AM.BaseGV)
899     return false;
900 
901   // Require a 12-bit signed offset.
902   if (!isInt<12>(AM.BaseOffs))
903     return false;
904 
905   switch (AM.Scale) {
906   case 0: // "r+i" or just "i", depending on HasBaseReg.
907     break;
908   case 1:
909     if (!AM.HasBaseReg) // allow "r+i".
910       break;
911     return false; // disallow "r+r" or "r+r+i".
912   default:
913     return false;
914   }
915 
916   return true;
917 }
918 
919 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
920   return isInt<12>(Imm);
921 }
922 
923 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
924   return isInt<12>(Imm);
925 }
926 
927 // On RV32, 64-bit integers are split into their high and low parts and held
928 // in two different registers, so the trunc is free since the low register can
929 // just be used.
930 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
931   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
932     return false;
933   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
934   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
935   return (SrcBits == 64 && DestBits == 32);
936 }
937 
938 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
939   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
940       !SrcVT.isInteger() || !DstVT.isInteger())
941     return false;
942   unsigned SrcBits = SrcVT.getSizeInBits();
943   unsigned DestBits = DstVT.getSizeInBits();
944   return (SrcBits == 64 && DestBits == 32);
945 }
946 
947 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
948   // Zexts are free if they can be combined with a load.
949   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
950     EVT MemVT = LD->getMemoryVT();
951     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
952          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
953         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
954          LD->getExtensionType() == ISD::ZEXTLOAD))
955       return true;
956   }
957 
958   return TargetLowering::isZExtFree(Val, VT2);
959 }
960 
961 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
962   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
963 }
964 
965 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
966   return Subtarget.hasStdExtZbb();
967 }
968 
969 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
970   return Subtarget.hasStdExtZbb();
971 }
972 
973 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
974                                        bool ForCodeSize) const {
975   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
976     return false;
977   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
978     return false;
979   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
980     return false;
981   if (Imm.isNegZero())
982     return false;
983   return Imm.isZero();
984 }
985 
986 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
987   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
988          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
989          (VT == MVT::f64 && Subtarget.hasStdExtD());
990 }
991 
992 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
993                                                       CallingConv::ID CC,
994                                                       EVT VT) const {
995   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
996   // end up using a GPR but that will be decided based on ABI.
997   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
998     return MVT::f32;
999 
1000   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1001 }
1002 
1003 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1004                                                            CallingConv::ID CC,
1005                                                            EVT VT) const {
1006   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1007   // end up using a GPR but that will be decided based on ABI.
1008   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1009     return 1;
1010 
1011   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1012 }
1013 
1014 // Changes the condition code and swaps operands if necessary, so the SetCC
1015 // operation matches one of the comparisons supported directly by branches
1016 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1017 // with 1/-1.
1018 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1019                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1020   // Convert X > -1 to X >= 0.
1021   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1022     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1023     CC = ISD::SETGE;
1024     return;
1025   }
1026   // Convert X < 1 to 0 >= X.
1027   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1028     RHS = LHS;
1029     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1030     CC = ISD::SETGE;
1031     return;
1032   }
1033 
1034   switch (CC) {
1035   default:
1036     break;
1037   case ISD::SETGT:
1038   case ISD::SETLE:
1039   case ISD::SETUGT:
1040   case ISD::SETULE:
1041     CC = ISD::getSetCCSwappedOperands(CC);
1042     std::swap(LHS, RHS);
1043     break;
1044   }
1045 }
1046 
1047 // Return the RISC-V branch opcode that matches the given DAG integer
1048 // condition code. The CondCode must be one of those supported by the RISC-V
1049 // ISA (see translateSetCCForBranch).
1050 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1051   switch (CC) {
1052   default:
1053     llvm_unreachable("Unsupported CondCode");
1054   case ISD::SETEQ:
1055     return RISCV::BEQ;
1056   case ISD::SETNE:
1057     return RISCV::BNE;
1058   case ISD::SETLT:
1059     return RISCV::BLT;
1060   case ISD::SETGE:
1061     return RISCV::BGE;
1062   case ISD::SETULT:
1063     return RISCV::BLTU;
1064   case ISD::SETUGE:
1065     return RISCV::BGEU;
1066   }
1067 }
1068 
1069 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1070   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1071   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1072   if (VT.getVectorElementType() == MVT::i1)
1073     KnownSize *= 8;
1074 
1075   switch (KnownSize) {
1076   default:
1077     llvm_unreachable("Invalid LMUL.");
1078   case 8:
1079     return RISCVII::VLMUL::LMUL_F8;
1080   case 16:
1081     return RISCVII::VLMUL::LMUL_F4;
1082   case 32:
1083     return RISCVII::VLMUL::LMUL_F2;
1084   case 64:
1085     return RISCVII::VLMUL::LMUL_1;
1086   case 128:
1087     return RISCVII::VLMUL::LMUL_2;
1088   case 256:
1089     return RISCVII::VLMUL::LMUL_4;
1090   case 512:
1091     return RISCVII::VLMUL::LMUL_8;
1092   }
1093 }
1094 
1095 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1096   switch (LMul) {
1097   default:
1098     llvm_unreachable("Invalid LMUL.");
1099   case RISCVII::VLMUL::LMUL_F8:
1100   case RISCVII::VLMUL::LMUL_F4:
1101   case RISCVII::VLMUL::LMUL_F2:
1102   case RISCVII::VLMUL::LMUL_1:
1103     return RISCV::VRRegClassID;
1104   case RISCVII::VLMUL::LMUL_2:
1105     return RISCV::VRM2RegClassID;
1106   case RISCVII::VLMUL::LMUL_4:
1107     return RISCV::VRM4RegClassID;
1108   case RISCVII::VLMUL::LMUL_8:
1109     return RISCV::VRM8RegClassID;
1110   }
1111 }
1112 
1113 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1114   RISCVII::VLMUL LMUL = getLMUL(VT);
1115   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1116       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1117       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1118       LMUL == RISCVII::VLMUL::LMUL_1) {
1119     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1120                   "Unexpected subreg numbering");
1121     return RISCV::sub_vrm1_0 + Index;
1122   }
1123   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1124     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1125                   "Unexpected subreg numbering");
1126     return RISCV::sub_vrm2_0 + Index;
1127   }
1128   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1129     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1130                   "Unexpected subreg numbering");
1131     return RISCV::sub_vrm4_0 + Index;
1132   }
1133   llvm_unreachable("Invalid vector type.");
1134 }
1135 
1136 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1137   if (VT.getVectorElementType() == MVT::i1)
1138     return RISCV::VRRegClassID;
1139   return getRegClassIDForLMUL(getLMUL(VT));
1140 }
1141 
1142 // Attempt to decompose a subvector insert/extract between VecVT and
1143 // SubVecVT via subregister indices. Returns the subregister index that
1144 // can perform the subvector insert/extract with the given element index, as
1145 // well as the index corresponding to any leftover subvectors that must be
1146 // further inserted/extracted within the register class for SubVecVT.
1147 std::pair<unsigned, unsigned>
1148 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1149     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1150     const RISCVRegisterInfo *TRI) {
1151   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1152                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1153                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1154                 "Register classes not ordered");
1155   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1156   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1157   // Try to compose a subregister index that takes us from the incoming
1158   // LMUL>1 register class down to the outgoing one. At each step we half
1159   // the LMUL:
1160   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1161   // Note that this is not guaranteed to find a subregister index, such as
1162   // when we are extracting from one VR type to another.
1163   unsigned SubRegIdx = RISCV::NoSubRegister;
1164   for (const unsigned RCID :
1165        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1166     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1167       VecVT = VecVT.getHalfNumVectorElementsVT();
1168       bool IsHi =
1169           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1170       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1171                                             getSubregIndexByMVT(VecVT, IsHi));
1172       if (IsHi)
1173         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1174     }
1175   return {SubRegIdx, InsertExtractIdx};
1176 }
1177 
1178 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1179 // stores for those types.
1180 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1181   return !Subtarget.useRVVForFixedLengthVectors() ||
1182          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1183 }
1184 
1185 static bool useRVVForFixedLengthVectorVT(MVT VT,
1186                                          const RISCVSubtarget &Subtarget) {
1187   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1188   if (!Subtarget.useRVVForFixedLengthVectors())
1189     return false;
1190 
1191   // We only support a set of vector types with a consistent maximum fixed size
1192   // across all supported vector element types to avoid legalization issues.
1193   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1194   // fixed-length vector type we support is 1024 bytes.
1195   if (VT.getFixedSizeInBits() > 1024 * 8)
1196     return false;
1197 
1198   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1199 
1200   // Don't use RVV for vectors we cannot scalarize if required.
1201   switch (VT.getVectorElementType().SimpleTy) {
1202   // i1 is supported but has different rules.
1203   default:
1204     return false;
1205   case MVT::i1:
1206     // Masks can only use a single register.
1207     if (VT.getVectorNumElements() > MinVLen)
1208       return false;
1209     MinVLen /= 8;
1210     break;
1211   case MVT::i8:
1212   case MVT::i16:
1213   case MVT::i32:
1214   case MVT::i64:
1215     break;
1216   case MVT::f16:
1217     if (!Subtarget.hasStdExtZfh())
1218       return false;
1219     break;
1220   case MVT::f32:
1221     if (!Subtarget.hasStdExtF())
1222       return false;
1223     break;
1224   case MVT::f64:
1225     if (!Subtarget.hasStdExtD())
1226       return false;
1227     break;
1228   }
1229 
1230   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1231   // Don't use RVV for types that don't fit.
1232   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1233     return false;
1234 
1235   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1236   // the base fixed length RVV support in place.
1237   if (!VT.isPow2VectorType())
1238     return false;
1239 
1240   return true;
1241 }
1242 
1243 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1244   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1245 }
1246 
1247 // Return the largest legal scalable vector type that matches VT's element type.
1248 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1249                                             const RISCVSubtarget &Subtarget) {
1250   // This may be called before legal types are setup.
1251   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1252           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1253          "Expected legal fixed length vector!");
1254 
1255   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1256 
1257   MVT EltVT = VT.getVectorElementType();
1258   switch (EltVT.SimpleTy) {
1259   default:
1260     llvm_unreachable("unexpected element type for RVV container");
1261   case MVT::i1:
1262   case MVT::i8:
1263   case MVT::i16:
1264   case MVT::i32:
1265   case MVT::i64:
1266   case MVT::f16:
1267   case MVT::f32:
1268   case MVT::f64: {
1269     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1270     // narrower types, but we can't have a fractional LMUL with demoninator less
1271     // than 64/SEW.
1272     unsigned NumElts =
1273         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1274     return MVT::getScalableVectorVT(EltVT, NumElts);
1275   }
1276   }
1277 }
1278 
1279 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1280                                             const RISCVSubtarget &Subtarget) {
1281   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1282                                           Subtarget);
1283 }
1284 
1285 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1286   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1287 }
1288 
1289 // Grow V to consume an entire RVV register.
1290 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1291                                        const RISCVSubtarget &Subtarget) {
1292   assert(VT.isScalableVector() &&
1293          "Expected to convert into a scalable vector!");
1294   assert(V.getValueType().isFixedLengthVector() &&
1295          "Expected a fixed length vector operand!");
1296   SDLoc DL(V);
1297   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1298   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1299 }
1300 
1301 // Shrink V so it's just big enough to maintain a VT's worth of data.
1302 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1303                                          const RISCVSubtarget &Subtarget) {
1304   assert(VT.isFixedLengthVector() &&
1305          "Expected to convert into a fixed length vector!");
1306   assert(V.getValueType().isScalableVector() &&
1307          "Expected a scalable vector operand!");
1308   SDLoc DL(V);
1309   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1310   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1311 }
1312 
1313 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1314 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1315 // the vector type that it is contained in.
1316 static std::pair<SDValue, SDValue>
1317 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1318                 const RISCVSubtarget &Subtarget) {
1319   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1320   MVT XLenVT = Subtarget.getXLenVT();
1321   SDValue VL = VecVT.isFixedLengthVector()
1322                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1323                    : DAG.getRegister(RISCV::X0, XLenVT);
1324   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1325   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1326   return {Mask, VL};
1327 }
1328 
1329 // As above but assuming the given type is a scalable vector type.
1330 static std::pair<SDValue, SDValue>
1331 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1332                         const RISCVSubtarget &Subtarget) {
1333   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1334   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1335 }
1336 
1337 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1338 // of either is (currently) supported. This can get us into an infinite loop
1339 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1340 // as a ..., etc.
1341 // Until either (or both) of these can reliably lower any node, reporting that
1342 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1343 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1344 // which is not desirable.
1345 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1346     EVT VT, unsigned DefinedValues) const {
1347   return false;
1348 }
1349 
1350 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1351   // Only splats are currently supported.
1352   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1353     return true;
1354 
1355   return false;
1356 }
1357 
1358 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1359                                  const RISCVSubtarget &Subtarget) {
1360   MVT VT = Op.getSimpleValueType();
1361   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1362 
1363   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1364 
1365   SDLoc DL(Op);
1366   SDValue Mask, VL;
1367   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1368 
1369   unsigned Opc =
1370       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1371   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1372   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1373 }
1374 
1375 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1376                                  const RISCVSubtarget &Subtarget) {
1377   MVT VT = Op.getSimpleValueType();
1378   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1379 
1380   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1381 
1382   SDLoc DL(Op);
1383   SDValue Mask, VL;
1384   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1385 
1386   MVT XLenVT = Subtarget.getXLenVT();
1387   unsigned NumElts = Op.getNumOperands();
1388 
1389   if (VT.getVectorElementType() == MVT::i1) {
1390     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1391       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1392       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1393     }
1394 
1395     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1396       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1397       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1398     }
1399 
1400     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1401     // scalar integer chunks whose bit-width depends on the number of mask
1402     // bits and XLEN.
1403     // First, determine the most appropriate scalar integer type to use. This
1404     // is at most XLenVT, but may be shrunk to a smaller vector element type
1405     // according to the size of the final vector - use i8 chunks rather than
1406     // XLenVT if we're producing a v8i1. This results in more consistent
1407     // codegen across RV32 and RV64.
1408     unsigned NumViaIntegerBits =
1409         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1410     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1411       // If we have to use more than one INSERT_VECTOR_ELT then this
1412       // optimization is likely to increase code size; avoid peforming it in
1413       // such a case. We can use a load from a constant pool in this case.
1414       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1415         return SDValue();
1416       // Now we can create our integer vector type. Note that it may be larger
1417       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1418       MVT IntegerViaVecVT =
1419           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1420                            divideCeil(NumElts, NumViaIntegerBits));
1421 
1422       uint64_t Bits = 0;
1423       unsigned BitPos = 0, IntegerEltIdx = 0;
1424       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1425 
1426       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1427         // Once we accumulate enough bits to fill our scalar type, insert into
1428         // our vector and clear our accumulated data.
1429         if (I != 0 && I % NumViaIntegerBits == 0) {
1430           if (NumViaIntegerBits <= 32)
1431             Bits = SignExtend64(Bits, 32);
1432           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1433           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1434                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1435           Bits = 0;
1436           BitPos = 0;
1437           IntegerEltIdx++;
1438         }
1439         SDValue V = Op.getOperand(I);
1440         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1441         Bits |= ((uint64_t)BitValue << BitPos);
1442       }
1443 
1444       // Insert the (remaining) scalar value into position in our integer
1445       // vector type.
1446       if (NumViaIntegerBits <= 32)
1447         Bits = SignExtend64(Bits, 32);
1448       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1449       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1450                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1451 
1452       if (NumElts < NumViaIntegerBits) {
1453         // If we're producing a smaller vector than our minimum legal integer
1454         // type, bitcast to the equivalent (known-legal) mask type, and extract
1455         // our final mask.
1456         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1457         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1458         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1459                           DAG.getConstant(0, DL, XLenVT));
1460       } else {
1461         // Else we must have produced an integer type with the same size as the
1462         // mask type; bitcast for the final result.
1463         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1464         Vec = DAG.getBitcast(VT, Vec);
1465       }
1466 
1467       return Vec;
1468     }
1469 
1470     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1471     // vector type, we have a legal equivalently-sized i8 type, so we can use
1472     // that.
1473     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1474     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1475 
1476     SDValue WideVec;
1477     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1478       // For a splat, perform a scalar truncate before creating the wider
1479       // vector.
1480       assert(Splat.getValueType() == XLenVT &&
1481              "Unexpected type for i1 splat value");
1482       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1483                           DAG.getConstant(1, DL, XLenVT));
1484       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1485     } else {
1486       SmallVector<SDValue, 8> Ops(Op->op_values());
1487       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1488       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1489       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1490     }
1491 
1492     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1493   }
1494 
1495   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1496     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1497                                         : RISCVISD::VMV_V_X_VL;
1498     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1499     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1500   }
1501 
1502   // Try and match an index sequence, which we can lower directly to the vid
1503   // instruction. An all-undef vector is matched by getSplatValue, above.
1504   if (VT.isInteger()) {
1505     bool IsVID = true;
1506     for (unsigned I = 0; I < NumElts && IsVID; I++)
1507       IsVID &= Op.getOperand(I).isUndef() ||
1508                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1509                 Op.getConstantOperandVal(I) == I);
1510 
1511     if (IsVID) {
1512       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1513       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1514     }
1515   }
1516 
1517   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1518   // when re-interpreted as a vector with a larger element type. For example,
1519   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1520   // could be instead splat as
1521   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1522   // TODO: This optimization could also work on non-constant splats, but it
1523   // would require bit-manipulation instructions to construct the splat value.
1524   SmallVector<SDValue> Sequence;
1525   unsigned EltBitSize = VT.getScalarSizeInBits();
1526   const auto *BV = cast<BuildVectorSDNode>(Op);
1527   if (VT.isInteger() && EltBitSize < 64 &&
1528       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1529       BV->getRepeatedSequence(Sequence) &&
1530       (Sequence.size() * EltBitSize) <= 64) {
1531     unsigned SeqLen = Sequence.size();
1532     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1533     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1534     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1535             ViaIntVT == MVT::i64) &&
1536            "Unexpected sequence type");
1537 
1538     unsigned EltIdx = 0;
1539     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1540     uint64_t SplatValue = 0;
1541     // Construct the amalgamated value which can be splatted as this larger
1542     // vector type.
1543     for (const auto &SeqV : Sequence) {
1544       if (!SeqV.isUndef())
1545         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1546                        << (EltIdx * EltBitSize));
1547       EltIdx++;
1548     }
1549 
1550     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1551     // achieve better constant materializion.
1552     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1553       SplatValue = SignExtend64(SplatValue, 32);
1554 
1555     // Since we can't introduce illegal i64 types at this stage, we can only
1556     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1557     // way we can use RVV instructions to splat.
1558     assert((ViaIntVT.bitsLE(XLenVT) ||
1559             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1560            "Unexpected bitcast sequence");
1561     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1562       SDValue ViaVL =
1563           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1564       MVT ViaContainerVT =
1565           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1566       SDValue Splat =
1567           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1568                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1569       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1570       return DAG.getBitcast(VT, Splat);
1571     }
1572   }
1573 
1574   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1575   // which constitute a large proportion of the elements. In such cases we can
1576   // splat a vector with the dominant element and make up the shortfall with
1577   // INSERT_VECTOR_ELTs.
1578   // Note that this includes vectors of 2 elements by association. The
1579   // upper-most element is the "dominant" one, allowing us to use a splat to
1580   // "insert" the upper element, and an insert of the lower element at position
1581   // 0, which improves codegen.
1582   SDValue DominantValue;
1583   unsigned MostCommonCount = 0;
1584   DenseMap<SDValue, unsigned> ValueCounts;
1585   unsigned NumUndefElts =
1586       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1587 
1588   for (SDValue V : Op->op_values()) {
1589     if (V.isUndef())
1590       continue;
1591 
1592     ValueCounts.insert(std::make_pair(V, 0));
1593     unsigned &Count = ValueCounts[V];
1594 
1595     // Is this value dominant? In case of a tie, prefer the highest element as
1596     // it's cheaper to insert near the beginning of a vector than it is at the
1597     // end.
1598     if (++Count >= MostCommonCount) {
1599       DominantValue = V;
1600       MostCommonCount = Count;
1601     }
1602   }
1603 
1604   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1605   unsigned NumDefElts = NumElts - NumUndefElts;
1606   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1607 
1608   // Don't perform this optimization when optimizing for size, since
1609   // materializing elements and inserting them tends to cause code bloat.
1610   if (!DAG.shouldOptForSize() &&
1611       ((MostCommonCount > DominantValueCountThreshold) ||
1612        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1613     // Start by splatting the most common element.
1614     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1615 
1616     DenseSet<SDValue> Processed{DominantValue};
1617     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1618     for (const auto &OpIdx : enumerate(Op->ops())) {
1619       const SDValue &V = OpIdx.value();
1620       if (V.isUndef() || !Processed.insert(V).second)
1621         continue;
1622       if (ValueCounts[V] == 1) {
1623         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1624                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1625       } else {
1626         // Blend in all instances of this value using a VSELECT, using a
1627         // mask where each bit signals whether that element is the one
1628         // we're after.
1629         SmallVector<SDValue> Ops;
1630         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1631           return DAG.getConstant(V == V1, DL, XLenVT);
1632         });
1633         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1634                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1635                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1636       }
1637     }
1638 
1639     return Vec;
1640   }
1641 
1642   return SDValue();
1643 }
1644 
1645 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1646                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1647   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1648     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1649     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1650     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1651     // node in order to try and match RVV vector/scalar instructions.
1652     if ((LoC >> 31) == HiC)
1653       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1654   }
1655 
1656   // Fall back to a stack store and stride x0 vector load.
1657   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1658 }
1659 
1660 // Called by type legalization to handle splat of i64 on RV32.
1661 // FIXME: We can optimize this when the type has sign or zero bits in one
1662 // of the halves.
1663 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1664                                    SDValue VL, SelectionDAG &DAG) {
1665   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1666   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1667                            DAG.getConstant(0, DL, MVT::i32));
1668   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1669                            DAG.getConstant(1, DL, MVT::i32));
1670   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1671 }
1672 
1673 // This function lowers a splat of a scalar operand Splat with the vector
1674 // length VL. It ensures the final sequence is type legal, which is useful when
1675 // lowering a splat after type legalization.
1676 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1677                                 SelectionDAG &DAG,
1678                                 const RISCVSubtarget &Subtarget) {
1679   if (VT.isFloatingPoint())
1680     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1681 
1682   MVT XLenVT = Subtarget.getXLenVT();
1683 
1684   // Simplest case is that the operand needs to be promoted to XLenVT.
1685   if (Scalar.getValueType().bitsLE(XLenVT)) {
1686     // If the operand is a constant, sign extend to increase our chances
1687     // of being able to use a .vi instruction. ANY_EXTEND would become a
1688     // a zero extend and the simm5 check in isel would fail.
1689     // FIXME: Should we ignore the upper bits in isel instead?
1690     unsigned ExtOpc =
1691         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1692     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1693     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1694   }
1695 
1696   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1697          "Unexpected scalar for splat lowering!");
1698 
1699   // Otherwise use the more complicated splatting algorithm.
1700   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1701 }
1702 
1703 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1704                                    const RISCVSubtarget &Subtarget) {
1705   SDValue V1 = Op.getOperand(0);
1706   SDValue V2 = Op.getOperand(1);
1707   SDLoc DL(Op);
1708   MVT XLenVT = Subtarget.getXLenVT();
1709   MVT VT = Op.getSimpleValueType();
1710   unsigned NumElts = VT.getVectorNumElements();
1711   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1712 
1713   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1714 
1715   SDValue TrueMask, VL;
1716   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1717 
1718   if (SVN->isSplat()) {
1719     const int Lane = SVN->getSplatIndex();
1720     if (Lane >= 0) {
1721       MVT SVT = VT.getVectorElementType();
1722 
1723       // Turn splatted vector load into a strided load with an X0 stride.
1724       SDValue V = V1;
1725       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1726       // with undef.
1727       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1728       int Offset = Lane;
1729       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1730         int OpElements =
1731             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1732         V = V.getOperand(Offset / OpElements);
1733         Offset %= OpElements;
1734       }
1735 
1736       // We need to ensure the load isn't atomic or volatile.
1737       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1738         auto *Ld = cast<LoadSDNode>(V);
1739         Offset *= SVT.getStoreSize();
1740         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1741                                                    TypeSize::Fixed(Offset), DL);
1742 
1743         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1744         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1745           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1746           SDValue IntID =
1747               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1748           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1749                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1750           SDValue NewLoad = DAG.getMemIntrinsicNode(
1751               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1752               DAG.getMachineFunction().getMachineMemOperand(
1753                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1754           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1755           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1756         }
1757 
1758         // Otherwise use a scalar load and splat. This will give the best
1759         // opportunity to fold a splat into the operation. ISel can turn it into
1760         // the x0 strided load if we aren't able to fold away the select.
1761         if (SVT.isFloatingPoint())
1762           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1763                           Ld->getPointerInfo().getWithOffset(Offset),
1764                           Ld->getOriginalAlign(),
1765                           Ld->getMemOperand()->getFlags());
1766         else
1767           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1768                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1769                              Ld->getOriginalAlign(),
1770                              Ld->getMemOperand()->getFlags());
1771         DAG.makeEquivalentMemoryOrdering(Ld, V);
1772 
1773         unsigned Opc =
1774             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1775         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1776         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1777       }
1778 
1779       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1780       assert(Lane < (int)NumElts && "Unexpected lane!");
1781       SDValue Gather =
1782           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1783                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1784       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1785     }
1786   }
1787 
1788   // Detect shuffles which can be re-expressed as vector selects; these are
1789   // shuffles in which each element in the destination is taken from an element
1790   // at the corresponding index in either source vectors.
1791   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1792     int MaskIndex = MaskIdx.value();
1793     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1794   });
1795 
1796   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1797 
1798   SmallVector<SDValue> MaskVals;
1799   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1800   // merged with a second vrgather.
1801   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1802 
1803   // By default we preserve the original operand order, and use a mask to
1804   // select LHS as true and RHS as false. However, since RVV vector selects may
1805   // feature splats but only on the LHS, we may choose to invert our mask and
1806   // instead select between RHS and LHS.
1807   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1808   bool InvertMask = IsSelect == SwapOps;
1809 
1810   // Now construct the mask that will be used by the vselect or blended
1811   // vrgather operation. For vrgathers, construct the appropriate indices into
1812   // each vector.
1813   for (int MaskIndex : SVN->getMask()) {
1814     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1815     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1816     if (!IsSelect) {
1817       bool IsLHS = MaskIndex < (int)NumElts;
1818       // For "undef" elements of -1, shuffle in element 0 instead.
1819       GatherIndicesLHS.push_back(
1820           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1821       // TODO: If we're masking out unused elements anyway, it might produce
1822       // better code if we use the most-common element index instead of 0.
1823       GatherIndicesRHS.push_back(
1824           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1825     }
1826   }
1827 
1828   if (SwapOps) {
1829     std::swap(V1, V2);
1830     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1831   }
1832 
1833   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1834   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1835   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1836 
1837   if (IsSelect)
1838     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1839 
1840   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1841     // On such a large vector we're unable to use i8 as the index type.
1842     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1843     // may involve vector splitting if we're already at LMUL=8, or our
1844     // user-supplied maximum fixed-length LMUL.
1845     return SDValue();
1846   }
1847 
1848   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1849   MVT IndexVT = VT.changeTypeToInteger();
1850   // Since we can't introduce illegal index types at this stage, use i16 and
1851   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1852   // than XLenVT.
1853   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1854     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1855     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1856   }
1857 
1858   MVT IndexContainerVT =
1859       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1860 
1861   SDValue Gather;
1862   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1863   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1864   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1865     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1866   } else {
1867     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1868     LHSIndices =
1869         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1870 
1871     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1872     Gather =
1873         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1874   }
1875 
1876   // If a second vector operand is used by this shuffle, blend it in with an
1877   // additional vrgather.
1878   if (!V2.isUndef()) {
1879     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1880     SelectMask =
1881         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1882 
1883     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1884     RHSIndices =
1885         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1886 
1887     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1888     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1889     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1890                          Gather, VL);
1891   }
1892 
1893   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1894 }
1895 
1896 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1897                                      SDLoc DL, SelectionDAG &DAG,
1898                                      const RISCVSubtarget &Subtarget) {
1899   if (VT.isScalableVector())
1900     return DAG.getFPExtendOrRound(Op, DL, VT);
1901   assert(VT.isFixedLengthVector() &&
1902          "Unexpected value type for RVV FP extend/round lowering");
1903   SDValue Mask, VL;
1904   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1905   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1906                         ? RISCVISD::FP_EXTEND_VL
1907                         : RISCVISD::FP_ROUND_VL;
1908   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1909 }
1910 
1911 // While RVV has alignment restrictions, we should always be able to load as a
1912 // legal equivalently-sized byte-typed vector instead. This method is
1913 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
1914 // the load is already correctly-aligned, it returns SDValue().
1915 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
1916                                                     SelectionDAG &DAG) const {
1917   auto *Load = cast<LoadSDNode>(Op);
1918   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
1919 
1920   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1921                                      Load->getMemoryVT(),
1922                                      *Load->getMemOperand()))
1923     return SDValue();
1924 
1925   SDLoc DL(Op);
1926   MVT VT = Op.getSimpleValueType();
1927   unsigned EltSizeBits = VT.getScalarSizeInBits();
1928   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1929          "Unexpected unaligned RVV load type");
1930   MVT NewVT =
1931       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1932   assert(NewVT.isValid() &&
1933          "Expecting equally-sized RVV vector types to be legal");
1934   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
1935                           Load->getPointerInfo(), Load->getOriginalAlign(),
1936                           Load->getMemOperand()->getFlags());
1937   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
1938 }
1939 
1940 // While RVV has alignment restrictions, we should always be able to store as a
1941 // legal equivalently-sized byte-typed vector instead. This method is
1942 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
1943 // returns SDValue() if the store is already correctly aligned.
1944 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
1945                                                      SelectionDAG &DAG) const {
1946   auto *Store = cast<StoreSDNode>(Op);
1947   assert(Store && Store->getValue().getValueType().isVector() &&
1948          "Expected vector store");
1949 
1950   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1951                                      Store->getMemoryVT(),
1952                                      *Store->getMemOperand()))
1953     return SDValue();
1954 
1955   SDLoc DL(Op);
1956   SDValue StoredVal = Store->getValue();
1957   MVT VT = StoredVal.getSimpleValueType();
1958   unsigned EltSizeBits = VT.getScalarSizeInBits();
1959   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1960          "Unexpected unaligned RVV store type");
1961   MVT NewVT =
1962       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1963   assert(NewVT.isValid() &&
1964          "Expecting equally-sized RVV vector types to be legal");
1965   StoredVal = DAG.getBitcast(NewVT, StoredVal);
1966   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
1967                       Store->getPointerInfo(), Store->getOriginalAlign(),
1968                       Store->getMemOperand()->getFlags());
1969 }
1970 
1971 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1972                                             SelectionDAG &DAG) const {
1973   switch (Op.getOpcode()) {
1974   default:
1975     report_fatal_error("unimplemented operand");
1976   case ISD::GlobalAddress:
1977     return lowerGlobalAddress(Op, DAG);
1978   case ISD::BlockAddress:
1979     return lowerBlockAddress(Op, DAG);
1980   case ISD::ConstantPool:
1981     return lowerConstantPool(Op, DAG);
1982   case ISD::JumpTable:
1983     return lowerJumpTable(Op, DAG);
1984   case ISD::GlobalTLSAddress:
1985     return lowerGlobalTLSAddress(Op, DAG);
1986   case ISD::SELECT:
1987     return lowerSELECT(Op, DAG);
1988   case ISD::BRCOND:
1989     return lowerBRCOND(Op, DAG);
1990   case ISD::VASTART:
1991     return lowerVASTART(Op, DAG);
1992   case ISD::FRAMEADDR:
1993     return lowerFRAMEADDR(Op, DAG);
1994   case ISD::RETURNADDR:
1995     return lowerRETURNADDR(Op, DAG);
1996   case ISD::SHL_PARTS:
1997     return lowerShiftLeftParts(Op, DAG);
1998   case ISD::SRA_PARTS:
1999     return lowerShiftRightParts(Op, DAG, true);
2000   case ISD::SRL_PARTS:
2001     return lowerShiftRightParts(Op, DAG, false);
2002   case ISD::BITCAST: {
2003     SDLoc DL(Op);
2004     EVT VT = Op.getValueType();
2005     SDValue Op0 = Op.getOperand(0);
2006     EVT Op0VT = Op0.getValueType();
2007     MVT XLenVT = Subtarget.getXLenVT();
2008     if (VT.isFixedLengthVector()) {
2009       // We can handle fixed length vector bitcasts with a simple replacement
2010       // in isel.
2011       if (Op0VT.isFixedLengthVector())
2012         return Op;
2013       // When bitcasting from scalar to fixed-length vector, insert the scalar
2014       // into a one-element vector of the result type, and perform a vector
2015       // bitcast.
2016       if (!Op0VT.isVector()) {
2017         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2018         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2019                                               DAG.getUNDEF(BVT), Op0,
2020                                               DAG.getConstant(0, DL, XLenVT)));
2021       }
2022       return SDValue();
2023     }
2024     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2025     // thus: bitcast the vector to a one-element vector type whose element type
2026     // is the same as the result type, and extract the first element.
2027     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2028       LLVMContext &Context = *DAG.getContext();
2029       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2030       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2031                          DAG.getConstant(0, DL, XLenVT));
2032     }
2033     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2034       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2035       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2036       return FPConv;
2037     }
2038     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2039         Subtarget.hasStdExtF()) {
2040       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2041       SDValue FPConv =
2042           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2043       return FPConv;
2044     }
2045     return SDValue();
2046   }
2047   case ISD::INTRINSIC_WO_CHAIN:
2048     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2049   case ISD::INTRINSIC_W_CHAIN:
2050     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2051   case ISD::BSWAP:
2052   case ISD::BITREVERSE: {
2053     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2054     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2055     MVT VT = Op.getSimpleValueType();
2056     SDLoc DL(Op);
2057     // Start with the maximum immediate value which is the bitwidth - 1.
2058     unsigned Imm = VT.getSizeInBits() - 1;
2059     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2060     if (Op.getOpcode() == ISD::BSWAP)
2061       Imm &= ~0x7U;
2062     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2063                        DAG.getConstant(Imm, DL, VT));
2064   }
2065   case ISD::FSHL:
2066   case ISD::FSHR: {
2067     MVT VT = Op.getSimpleValueType();
2068     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2069     SDLoc DL(Op);
2070     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2071       return Op;
2072     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2073     // use log(XLen) bits. Mask the shift amount accordingly.
2074     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2075     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2076                                 DAG.getConstant(ShAmtWidth, DL, VT));
2077     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2078     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2079   }
2080   case ISD::TRUNCATE: {
2081     SDLoc DL(Op);
2082     MVT VT = Op.getSimpleValueType();
2083     // Only custom-lower vector truncates
2084     if (!VT.isVector())
2085       return Op;
2086 
2087     // Truncates to mask types are handled differently
2088     if (VT.getVectorElementType() == MVT::i1)
2089       return lowerVectorMaskTrunc(Op, DAG);
2090 
2091     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2092     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2093     // truncate by one power of two at a time.
2094     MVT DstEltVT = VT.getVectorElementType();
2095 
2096     SDValue Src = Op.getOperand(0);
2097     MVT SrcVT = Src.getSimpleValueType();
2098     MVT SrcEltVT = SrcVT.getVectorElementType();
2099 
2100     assert(DstEltVT.bitsLT(SrcEltVT) &&
2101            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2102            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2103            "Unexpected vector truncate lowering");
2104 
2105     MVT ContainerVT = SrcVT;
2106     if (SrcVT.isFixedLengthVector()) {
2107       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2108       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2109     }
2110 
2111     SDValue Result = Src;
2112     SDValue Mask, VL;
2113     std::tie(Mask, VL) =
2114         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2115     LLVMContext &Context = *DAG.getContext();
2116     const ElementCount Count = ContainerVT.getVectorElementCount();
2117     do {
2118       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2119       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2120       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2121                            Mask, VL);
2122     } while (SrcEltVT != DstEltVT);
2123 
2124     if (SrcVT.isFixedLengthVector())
2125       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2126 
2127     return Result;
2128   }
2129   case ISD::ANY_EXTEND:
2130   case ISD::ZERO_EXTEND:
2131     if (Op.getOperand(0).getValueType().isVector() &&
2132         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2133       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2134     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2135   case ISD::SIGN_EXTEND:
2136     if (Op.getOperand(0).getValueType().isVector() &&
2137         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2138       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2139     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2140   case ISD::SPLAT_VECTOR_PARTS:
2141     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2142   case ISD::INSERT_VECTOR_ELT:
2143     return lowerINSERT_VECTOR_ELT(Op, DAG);
2144   case ISD::EXTRACT_VECTOR_ELT:
2145     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2146   case ISD::VSCALE: {
2147     MVT VT = Op.getSimpleValueType();
2148     SDLoc DL(Op);
2149     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2150     // We define our scalable vector types for lmul=1 to use a 64 bit known
2151     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2152     // vscale as VLENB / 8.
2153     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2154     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2155                                  DAG.getConstant(3, DL, VT));
2156     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2157   }
2158   case ISD::FP_EXTEND: {
2159     // RVV can only do fp_extend to types double the size as the source. We
2160     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2161     // via f32.
2162     SDLoc DL(Op);
2163     MVT VT = Op.getSimpleValueType();
2164     SDValue Src = Op.getOperand(0);
2165     MVT SrcVT = Src.getSimpleValueType();
2166 
2167     // Prepare any fixed-length vector operands.
2168     MVT ContainerVT = VT;
2169     if (SrcVT.isFixedLengthVector()) {
2170       ContainerVT = getContainerForFixedLengthVector(VT);
2171       MVT SrcContainerVT =
2172           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2173       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2174     }
2175 
2176     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2177         SrcVT.getVectorElementType() != MVT::f16) {
2178       // For scalable vectors, we only need to close the gap between
2179       // vXf16->vXf64.
2180       if (!VT.isFixedLengthVector())
2181         return Op;
2182       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2183       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2184       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2185     }
2186 
2187     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2188     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2189     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2190         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2191 
2192     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2193                                            DL, DAG, Subtarget);
2194     if (VT.isFixedLengthVector())
2195       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2196     return Extend;
2197   }
2198   case ISD::FP_ROUND: {
2199     // RVV can only do fp_round to types half the size as the source. We
2200     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2201     // conversion instruction.
2202     SDLoc DL(Op);
2203     MVT VT = Op.getSimpleValueType();
2204     SDValue Src = Op.getOperand(0);
2205     MVT SrcVT = Src.getSimpleValueType();
2206 
2207     // Prepare any fixed-length vector operands.
2208     MVT ContainerVT = VT;
2209     if (VT.isFixedLengthVector()) {
2210       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2211       ContainerVT =
2212           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2213       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2214     }
2215 
2216     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2217         SrcVT.getVectorElementType() != MVT::f64) {
2218       // For scalable vectors, we only need to close the gap between
2219       // vXf64<->vXf16.
2220       if (!VT.isFixedLengthVector())
2221         return Op;
2222       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2223       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2224       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2225     }
2226 
2227     SDValue Mask, VL;
2228     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2229 
2230     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2231     SDValue IntermediateRound =
2232         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2233     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2234                                           DL, DAG, Subtarget);
2235 
2236     if (VT.isFixedLengthVector())
2237       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2238     return Round;
2239   }
2240   case ISD::FP_TO_SINT:
2241   case ISD::FP_TO_UINT:
2242   case ISD::SINT_TO_FP:
2243   case ISD::UINT_TO_FP: {
2244     // RVV can only do fp<->int conversions to types half/double the size as
2245     // the source. We custom-lower any conversions that do two hops into
2246     // sequences.
2247     MVT VT = Op.getSimpleValueType();
2248     if (!VT.isVector())
2249       return Op;
2250     SDLoc DL(Op);
2251     SDValue Src = Op.getOperand(0);
2252     MVT EltVT = VT.getVectorElementType();
2253     MVT SrcVT = Src.getSimpleValueType();
2254     MVT SrcEltVT = SrcVT.getVectorElementType();
2255     unsigned EltSize = EltVT.getSizeInBits();
2256     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2257     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2258            "Unexpected vector element types");
2259 
2260     bool IsInt2FP = SrcEltVT.isInteger();
2261     // Widening conversions
2262     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2263       if (IsInt2FP) {
2264         // Do a regular integer sign/zero extension then convert to float.
2265         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2266                                       VT.getVectorElementCount());
2267         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2268                                  ? ISD::ZERO_EXTEND
2269                                  : ISD::SIGN_EXTEND;
2270         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2271         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2272       }
2273       // FP2Int
2274       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2275       // Do one doubling fp_extend then complete the operation by converting
2276       // to int.
2277       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2278       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2279       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2280     }
2281 
2282     // Narrowing conversions
2283     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2284       if (IsInt2FP) {
2285         // One narrowing int_to_fp, then an fp_round.
2286         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2287         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2288         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2289         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2290       }
2291       // FP2Int
2292       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2293       // representable by the integer, the result is poison.
2294       MVT IVecVT =
2295           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2296                            VT.getVectorElementCount());
2297       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2298       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2299     }
2300 
2301     // Scalable vectors can exit here. Patterns will handle equally-sized
2302     // conversions halving/doubling ones.
2303     if (!VT.isFixedLengthVector())
2304       return Op;
2305 
2306     // For fixed-length vectors we lower to a custom "VL" node.
2307     unsigned RVVOpc = 0;
2308     switch (Op.getOpcode()) {
2309     default:
2310       llvm_unreachable("Impossible opcode");
2311     case ISD::FP_TO_SINT:
2312       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2313       break;
2314     case ISD::FP_TO_UINT:
2315       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2316       break;
2317     case ISD::SINT_TO_FP:
2318       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2319       break;
2320     case ISD::UINT_TO_FP:
2321       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2322       break;
2323     }
2324 
2325     MVT ContainerVT, SrcContainerVT;
2326     // Derive the reference container type from the larger vector type.
2327     if (SrcEltSize > EltSize) {
2328       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2329       ContainerVT =
2330           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2331     } else {
2332       ContainerVT = getContainerForFixedLengthVector(VT);
2333       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2334     }
2335 
2336     SDValue Mask, VL;
2337     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2338 
2339     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2340     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2341     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2342   }
2343   case ISD::VECREDUCE_ADD:
2344   case ISD::VECREDUCE_UMAX:
2345   case ISD::VECREDUCE_SMAX:
2346   case ISD::VECREDUCE_UMIN:
2347   case ISD::VECREDUCE_SMIN:
2348     return lowerVECREDUCE(Op, DAG);
2349   case ISD::VECREDUCE_AND:
2350   case ISD::VECREDUCE_OR:
2351   case ISD::VECREDUCE_XOR:
2352     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2353       return lowerVectorMaskVECREDUCE(Op, DAG);
2354     return lowerVECREDUCE(Op, DAG);
2355   case ISD::VECREDUCE_FADD:
2356   case ISD::VECREDUCE_SEQ_FADD:
2357   case ISD::VECREDUCE_FMIN:
2358   case ISD::VECREDUCE_FMAX:
2359     return lowerFPVECREDUCE(Op, DAG);
2360   case ISD::INSERT_SUBVECTOR:
2361     return lowerINSERT_SUBVECTOR(Op, DAG);
2362   case ISD::EXTRACT_SUBVECTOR:
2363     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2364   case ISD::STEP_VECTOR:
2365     return lowerSTEP_VECTOR(Op, DAG);
2366   case ISD::VECTOR_REVERSE:
2367     return lowerVECTOR_REVERSE(Op, DAG);
2368   case ISD::BUILD_VECTOR:
2369     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2370   case ISD::SPLAT_VECTOR:
2371     if (Op.getValueType().getVectorElementType() == MVT::i1)
2372       return lowerVectorMaskSplat(Op, DAG);
2373     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2374   case ISD::VECTOR_SHUFFLE:
2375     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2376   case ISD::CONCAT_VECTORS: {
2377     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2378     // better than going through the stack, as the default expansion does.
2379     SDLoc DL(Op);
2380     MVT VT = Op.getSimpleValueType();
2381     unsigned NumOpElts =
2382         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2383     SDValue Vec = DAG.getUNDEF(VT);
2384     for (const auto &OpIdx : enumerate(Op->ops()))
2385       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2386                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2387     return Vec;
2388   }
2389   case ISD::LOAD:
2390     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2391       return V;
2392     if (Op.getValueType().isFixedLengthVector())
2393       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2394     return Op;
2395   case ISD::STORE:
2396     if (auto V = expandUnalignedRVVStore(Op, DAG))
2397       return V;
2398     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2399       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2400     return Op;
2401   case ISD::MLOAD:
2402     return lowerMLOAD(Op, DAG);
2403   case ISD::MSTORE:
2404     return lowerMSTORE(Op, DAG);
2405   case ISD::SETCC:
2406     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2407   case ISD::ADD:
2408     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2409   case ISD::SUB:
2410     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2411   case ISD::MUL:
2412     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2413   case ISD::MULHS:
2414     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2415   case ISD::MULHU:
2416     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2417   case ISD::AND:
2418     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2419                                               RISCVISD::AND_VL);
2420   case ISD::OR:
2421     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2422                                               RISCVISD::OR_VL);
2423   case ISD::XOR:
2424     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2425                                               RISCVISD::XOR_VL);
2426   case ISD::SDIV:
2427     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2428   case ISD::SREM:
2429     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2430   case ISD::UDIV:
2431     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2432   case ISD::UREM:
2433     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2434   case ISD::SHL:
2435     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2436   case ISD::SRA:
2437     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2438   case ISD::SRL:
2439     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2440   case ISD::FADD:
2441     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2442   case ISD::FSUB:
2443     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2444   case ISD::FMUL:
2445     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2446   case ISD::FDIV:
2447     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2448   case ISD::FNEG:
2449     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2450   case ISD::FABS:
2451     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2452   case ISD::FSQRT:
2453     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2454   case ISD::FMA:
2455     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2456   case ISD::SMIN:
2457     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2458   case ISD::SMAX:
2459     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2460   case ISD::UMIN:
2461     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2462   case ISD::UMAX:
2463     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2464   case ISD::FMINNUM:
2465     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2466   case ISD::FMAXNUM:
2467     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2468   case ISD::ABS:
2469     return lowerABS(Op, DAG);
2470   case ISD::VSELECT:
2471     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2472   case ISD::FCOPYSIGN:
2473     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2474   case ISD::MGATHER:
2475     return lowerMGATHER(Op, DAG);
2476   case ISD::MSCATTER:
2477     return lowerMSCATTER(Op, DAG);
2478   case ISD::FLT_ROUNDS_:
2479     return lowerGET_ROUNDING(Op, DAG);
2480   case ISD::SET_ROUNDING:
2481     return lowerSET_ROUNDING(Op, DAG);
2482   case ISD::VP_ADD:
2483     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2484   case ISD::VP_SUB:
2485     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2486   case ISD::VP_MUL:
2487     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2488   case ISD::VP_SDIV:
2489     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2490   case ISD::VP_UDIV:
2491     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2492   case ISD::VP_SREM:
2493     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2494   case ISD::VP_UREM:
2495     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2496   case ISD::VP_AND:
2497     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2498   case ISD::VP_OR:
2499     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2500   case ISD::VP_XOR:
2501     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2502   case ISD::VP_ASHR:
2503     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2504   case ISD::VP_LSHR:
2505     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2506   case ISD::VP_SHL:
2507     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2508   case ISD::VP_FADD:
2509     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2510   case ISD::VP_FSUB:
2511     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2512   case ISD::VP_FMUL:
2513     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2514   case ISD::VP_FDIV:
2515     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2516   }
2517 }
2518 
2519 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2520                              SelectionDAG &DAG, unsigned Flags) {
2521   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2522 }
2523 
2524 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2525                              SelectionDAG &DAG, unsigned Flags) {
2526   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2527                                    Flags);
2528 }
2529 
2530 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2531                              SelectionDAG &DAG, unsigned Flags) {
2532   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2533                                    N->getOffset(), Flags);
2534 }
2535 
2536 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2537                              SelectionDAG &DAG, unsigned Flags) {
2538   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2539 }
2540 
2541 template <class NodeTy>
2542 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2543                                      bool IsLocal) const {
2544   SDLoc DL(N);
2545   EVT Ty = getPointerTy(DAG.getDataLayout());
2546 
2547   if (isPositionIndependent()) {
2548     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2549     if (IsLocal)
2550       // Use PC-relative addressing to access the symbol. This generates the
2551       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2552       // %pcrel_lo(auipc)).
2553       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2554 
2555     // Use PC-relative addressing to access the GOT for this symbol, then load
2556     // the address from the GOT. This generates the pattern (PseudoLA sym),
2557     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2558     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2559   }
2560 
2561   switch (getTargetMachine().getCodeModel()) {
2562   default:
2563     report_fatal_error("Unsupported code model for lowering");
2564   case CodeModel::Small: {
2565     // Generate a sequence for accessing addresses within the first 2 GiB of
2566     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2567     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2568     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2569     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2570     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2571   }
2572   case CodeModel::Medium: {
2573     // Generate a sequence for accessing addresses within any 2GiB range within
2574     // the address space. This generates the pattern (PseudoLLA sym), which
2575     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2576     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2577     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2578   }
2579   }
2580 }
2581 
2582 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2583                                                 SelectionDAG &DAG) const {
2584   SDLoc DL(Op);
2585   EVT Ty = Op.getValueType();
2586   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2587   int64_t Offset = N->getOffset();
2588   MVT XLenVT = Subtarget.getXLenVT();
2589 
2590   const GlobalValue *GV = N->getGlobal();
2591   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2592   SDValue Addr = getAddr(N, DAG, IsLocal);
2593 
2594   // In order to maximise the opportunity for common subexpression elimination,
2595   // emit a separate ADD node for the global address offset instead of folding
2596   // it in the global address node. Later peephole optimisations may choose to
2597   // fold it back in when profitable.
2598   if (Offset != 0)
2599     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2600                        DAG.getConstant(Offset, DL, XLenVT));
2601   return Addr;
2602 }
2603 
2604 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2605                                                SelectionDAG &DAG) const {
2606   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2607 
2608   return getAddr(N, DAG);
2609 }
2610 
2611 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2612                                                SelectionDAG &DAG) const {
2613   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2614 
2615   return getAddr(N, DAG);
2616 }
2617 
2618 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2619                                             SelectionDAG &DAG) const {
2620   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2621 
2622   return getAddr(N, DAG);
2623 }
2624 
2625 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2626                                               SelectionDAG &DAG,
2627                                               bool UseGOT) const {
2628   SDLoc DL(N);
2629   EVT Ty = getPointerTy(DAG.getDataLayout());
2630   const GlobalValue *GV = N->getGlobal();
2631   MVT XLenVT = Subtarget.getXLenVT();
2632 
2633   if (UseGOT) {
2634     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2635     // load the address from the GOT and add the thread pointer. This generates
2636     // the pattern (PseudoLA_TLS_IE sym), which expands to
2637     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2638     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2639     SDValue Load =
2640         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2641 
2642     // Add the thread pointer.
2643     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2644     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2645   }
2646 
2647   // Generate a sequence for accessing the address relative to the thread
2648   // pointer, with the appropriate adjustment for the thread pointer offset.
2649   // This generates the pattern
2650   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2651   SDValue AddrHi =
2652       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2653   SDValue AddrAdd =
2654       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2655   SDValue AddrLo =
2656       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2657 
2658   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2659   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2660   SDValue MNAdd = SDValue(
2661       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2662       0);
2663   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2664 }
2665 
2666 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2667                                                SelectionDAG &DAG) const {
2668   SDLoc DL(N);
2669   EVT Ty = getPointerTy(DAG.getDataLayout());
2670   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2671   const GlobalValue *GV = N->getGlobal();
2672 
2673   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2674   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2675   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2676   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2677   SDValue Load =
2678       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2679 
2680   // Prepare argument list to generate call.
2681   ArgListTy Args;
2682   ArgListEntry Entry;
2683   Entry.Node = Load;
2684   Entry.Ty = CallTy;
2685   Args.push_back(Entry);
2686 
2687   // Setup call to __tls_get_addr.
2688   TargetLowering::CallLoweringInfo CLI(DAG);
2689   CLI.setDebugLoc(DL)
2690       .setChain(DAG.getEntryNode())
2691       .setLibCallee(CallingConv::C, CallTy,
2692                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2693                     std::move(Args));
2694 
2695   return LowerCallTo(CLI).first;
2696 }
2697 
2698 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2699                                                    SelectionDAG &DAG) const {
2700   SDLoc DL(Op);
2701   EVT Ty = Op.getValueType();
2702   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2703   int64_t Offset = N->getOffset();
2704   MVT XLenVT = Subtarget.getXLenVT();
2705 
2706   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2707 
2708   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2709       CallingConv::GHC)
2710     report_fatal_error("In GHC calling convention TLS is not supported");
2711 
2712   SDValue Addr;
2713   switch (Model) {
2714   case TLSModel::LocalExec:
2715     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2716     break;
2717   case TLSModel::InitialExec:
2718     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2719     break;
2720   case TLSModel::LocalDynamic:
2721   case TLSModel::GeneralDynamic:
2722     Addr = getDynamicTLSAddr(N, DAG);
2723     break;
2724   }
2725 
2726   // In order to maximise the opportunity for common subexpression elimination,
2727   // emit a separate ADD node for the global address offset instead of folding
2728   // it in the global address node. Later peephole optimisations may choose to
2729   // fold it back in when profitable.
2730   if (Offset != 0)
2731     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2732                        DAG.getConstant(Offset, DL, XLenVT));
2733   return Addr;
2734 }
2735 
2736 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2737   SDValue CondV = Op.getOperand(0);
2738   SDValue TrueV = Op.getOperand(1);
2739   SDValue FalseV = Op.getOperand(2);
2740   SDLoc DL(Op);
2741   MVT VT = Op.getSimpleValueType();
2742   MVT XLenVT = Subtarget.getXLenVT();
2743 
2744   // Lower vector SELECTs to VSELECTs by splatting the condition.
2745   if (VT.isVector()) {
2746     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2747     SDValue CondSplat = VT.isScalableVector()
2748                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2749                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2750     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2751   }
2752 
2753   // If the result type is XLenVT and CondV is the output of a SETCC node
2754   // which also operated on XLenVT inputs, then merge the SETCC node into the
2755   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2756   // compare+branch instructions. i.e.:
2757   // (select (setcc lhs, rhs, cc), truev, falsev)
2758   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2759   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2760       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2761     SDValue LHS = CondV.getOperand(0);
2762     SDValue RHS = CondV.getOperand(1);
2763     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2764     ISD::CondCode CCVal = CC->get();
2765 
2766     // Special case for a select of 2 constants that have a diffence of 1.
2767     // Normally this is done by DAGCombine, but if the select is introduced by
2768     // type legalization or op legalization, we miss it. Restricting to SETLT
2769     // case for now because that is what signed saturating add/sub need.
2770     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2771     // but we would probably want to swap the true/false values if the condition
2772     // is SETGE/SETLE to avoid an XORI.
2773     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2774         CCVal == ISD::SETLT) {
2775       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2776       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2777       if (TrueVal - 1 == FalseVal)
2778         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2779       if (TrueVal + 1 == FalseVal)
2780         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2781     }
2782 
2783     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2784 
2785     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2786     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2787     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2788   }
2789 
2790   // Otherwise:
2791   // (select condv, truev, falsev)
2792   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2793   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2794   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2795 
2796   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2797 
2798   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2799 }
2800 
2801 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2802   SDValue CondV = Op.getOperand(1);
2803   SDLoc DL(Op);
2804   MVT XLenVT = Subtarget.getXLenVT();
2805 
2806   if (CondV.getOpcode() == ISD::SETCC &&
2807       CondV.getOperand(0).getValueType() == XLenVT) {
2808     SDValue LHS = CondV.getOperand(0);
2809     SDValue RHS = CondV.getOperand(1);
2810     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2811 
2812     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2813 
2814     SDValue TargetCC = DAG.getCondCode(CCVal);
2815     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2816                        LHS, RHS, TargetCC, Op.getOperand(2));
2817   }
2818 
2819   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2820                      CondV, DAG.getConstant(0, DL, XLenVT),
2821                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2822 }
2823 
2824 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2825   MachineFunction &MF = DAG.getMachineFunction();
2826   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2827 
2828   SDLoc DL(Op);
2829   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2830                                  getPointerTy(MF.getDataLayout()));
2831 
2832   // vastart just stores the address of the VarArgsFrameIndex slot into the
2833   // memory location argument.
2834   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2835   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2836                       MachinePointerInfo(SV));
2837 }
2838 
2839 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2840                                             SelectionDAG &DAG) const {
2841   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2842   MachineFunction &MF = DAG.getMachineFunction();
2843   MachineFrameInfo &MFI = MF.getFrameInfo();
2844   MFI.setFrameAddressIsTaken(true);
2845   Register FrameReg = RI.getFrameRegister(MF);
2846   int XLenInBytes = Subtarget.getXLen() / 8;
2847 
2848   EVT VT = Op.getValueType();
2849   SDLoc DL(Op);
2850   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2851   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2852   while (Depth--) {
2853     int Offset = -(XLenInBytes * 2);
2854     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2855                               DAG.getIntPtrConstant(Offset, DL));
2856     FrameAddr =
2857         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2858   }
2859   return FrameAddr;
2860 }
2861 
2862 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2863                                              SelectionDAG &DAG) const {
2864   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2865   MachineFunction &MF = DAG.getMachineFunction();
2866   MachineFrameInfo &MFI = MF.getFrameInfo();
2867   MFI.setReturnAddressIsTaken(true);
2868   MVT XLenVT = Subtarget.getXLenVT();
2869   int XLenInBytes = Subtarget.getXLen() / 8;
2870 
2871   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2872     return SDValue();
2873 
2874   EVT VT = Op.getValueType();
2875   SDLoc DL(Op);
2876   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2877   if (Depth) {
2878     int Off = -XLenInBytes;
2879     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2880     SDValue Offset = DAG.getConstant(Off, DL, VT);
2881     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2882                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2883                        MachinePointerInfo());
2884   }
2885 
2886   // Return the value of the return address register, marking it an implicit
2887   // live-in.
2888   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2889   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2890 }
2891 
2892 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2893                                                  SelectionDAG &DAG) const {
2894   SDLoc DL(Op);
2895   SDValue Lo = Op.getOperand(0);
2896   SDValue Hi = Op.getOperand(1);
2897   SDValue Shamt = Op.getOperand(2);
2898   EVT VT = Lo.getValueType();
2899 
2900   // if Shamt-XLEN < 0: // Shamt < XLEN
2901   //   Lo = Lo << Shamt
2902   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2903   // else:
2904   //   Lo = 0
2905   //   Hi = Lo << (Shamt-XLEN)
2906 
2907   SDValue Zero = DAG.getConstant(0, DL, VT);
2908   SDValue One = DAG.getConstant(1, DL, VT);
2909   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2910   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2911   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2912   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2913 
2914   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2915   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2916   SDValue ShiftRightLo =
2917       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2918   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2919   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2920   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2921 
2922   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2923 
2924   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2925   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2926 
2927   SDValue Parts[2] = {Lo, Hi};
2928   return DAG.getMergeValues(Parts, DL);
2929 }
2930 
2931 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2932                                                   bool IsSRA) const {
2933   SDLoc DL(Op);
2934   SDValue Lo = Op.getOperand(0);
2935   SDValue Hi = Op.getOperand(1);
2936   SDValue Shamt = Op.getOperand(2);
2937   EVT VT = Lo.getValueType();
2938 
2939   // SRA expansion:
2940   //   if Shamt-XLEN < 0: // Shamt < XLEN
2941   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2942   //     Hi = Hi >>s Shamt
2943   //   else:
2944   //     Lo = Hi >>s (Shamt-XLEN);
2945   //     Hi = Hi >>s (XLEN-1)
2946   //
2947   // SRL expansion:
2948   //   if Shamt-XLEN < 0: // Shamt < XLEN
2949   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2950   //     Hi = Hi >>u Shamt
2951   //   else:
2952   //     Lo = Hi >>u (Shamt-XLEN);
2953   //     Hi = 0;
2954 
2955   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2956 
2957   SDValue Zero = DAG.getConstant(0, DL, VT);
2958   SDValue One = DAG.getConstant(1, DL, VT);
2959   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2960   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2961   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2962   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2963 
2964   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2965   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2966   SDValue ShiftLeftHi =
2967       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2968   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2969   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2970   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2971   SDValue HiFalse =
2972       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2973 
2974   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2975 
2976   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2977   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2978 
2979   SDValue Parts[2] = {Lo, Hi};
2980   return DAG.getMergeValues(Parts, DL);
2981 }
2982 
2983 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2984 // legal equivalently-sized i8 type, so we can use that as a go-between.
2985 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
2986                                                   SelectionDAG &DAG) const {
2987   SDLoc DL(Op);
2988   MVT VT = Op.getSimpleValueType();
2989   SDValue SplatVal = Op.getOperand(0);
2990   // All-zeros or all-ones splats are handled specially.
2991   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
2992     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2993     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
2994   }
2995   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
2996     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2997     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
2998   }
2999   MVT XLenVT = Subtarget.getXLenVT();
3000   assert(SplatVal.getValueType() == XLenVT &&
3001          "Unexpected type for i1 splat value");
3002   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3003   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3004                          DAG.getConstant(1, DL, XLenVT));
3005   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3006   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3007   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3008 }
3009 
3010 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3011 // illegal (currently only vXi64 RV32).
3012 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3013 // them to SPLAT_VECTOR_I64
3014 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3015                                                      SelectionDAG &DAG) const {
3016   SDLoc DL(Op);
3017   MVT VecVT = Op.getSimpleValueType();
3018   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3019          "Unexpected SPLAT_VECTOR_PARTS lowering");
3020 
3021   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3022   SDValue Lo = Op.getOperand(0);
3023   SDValue Hi = Op.getOperand(1);
3024 
3025   if (VecVT.isFixedLengthVector()) {
3026     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3027     SDLoc DL(Op);
3028     SDValue Mask, VL;
3029     std::tie(Mask, VL) =
3030         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3031 
3032     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3033     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3034   }
3035 
3036   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3037     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3038     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3039     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3040     // node in order to try and match RVV vector/scalar instructions.
3041     if ((LoC >> 31) == HiC)
3042       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3043   }
3044 
3045   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3046   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3047       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3048       Hi.getConstantOperandVal(1) == 31)
3049     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3050 
3051   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3052   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3053                      DAG.getRegister(RISCV::X0, MVT::i64));
3054 }
3055 
3056 // Custom-lower extensions from mask vectors by using a vselect either with 1
3057 // for zero/any-extension or -1 for sign-extension:
3058 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3059 // Note that any-extension is lowered identically to zero-extension.
3060 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3061                                                 int64_t ExtTrueVal) const {
3062   SDLoc DL(Op);
3063   MVT VecVT = Op.getSimpleValueType();
3064   SDValue Src = Op.getOperand(0);
3065   // Only custom-lower extensions from mask types
3066   assert(Src.getValueType().isVector() &&
3067          Src.getValueType().getVectorElementType() == MVT::i1);
3068 
3069   MVT XLenVT = Subtarget.getXLenVT();
3070   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3071   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3072 
3073   if (VecVT.isScalableVector()) {
3074     // Be careful not to introduce illegal scalar types at this stage, and be
3075     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3076     // illegal and must be expanded. Since we know that the constants are
3077     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3078     bool IsRV32E64 =
3079         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3080 
3081     if (!IsRV32E64) {
3082       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3083       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3084     } else {
3085       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3086       SplatTrueVal =
3087           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3088     }
3089 
3090     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3091   }
3092 
3093   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3094   MVT I1ContainerVT =
3095       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3096 
3097   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3098 
3099   SDValue Mask, VL;
3100   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3101 
3102   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3103   SplatTrueVal =
3104       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3105   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3106                                SplatTrueVal, SplatZero, VL);
3107 
3108   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3109 }
3110 
3111 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3112     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3113   MVT ExtVT = Op.getSimpleValueType();
3114   // Only custom-lower extensions from fixed-length vector types.
3115   if (!ExtVT.isFixedLengthVector())
3116     return Op;
3117   MVT VT = Op.getOperand(0).getSimpleValueType();
3118   // Grab the canonical container type for the extended type. Infer the smaller
3119   // type from that to ensure the same number of vector elements, as we know
3120   // the LMUL will be sufficient to hold the smaller type.
3121   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3122   // Get the extended container type manually to ensure the same number of
3123   // vector elements between source and dest.
3124   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3125                                      ContainerExtVT.getVectorElementCount());
3126 
3127   SDValue Op1 =
3128       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3129 
3130   SDLoc DL(Op);
3131   SDValue Mask, VL;
3132   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3133 
3134   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3135 
3136   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3137 }
3138 
3139 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3140 // setcc operation:
3141 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3142 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3143                                                   SelectionDAG &DAG) const {
3144   SDLoc DL(Op);
3145   EVT MaskVT = Op.getValueType();
3146   // Only expect to custom-lower truncations to mask types
3147   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3148          "Unexpected type for vector mask lowering");
3149   SDValue Src = Op.getOperand(0);
3150   MVT VecVT = Src.getSimpleValueType();
3151 
3152   // If this is a fixed vector, we need to convert it to a scalable vector.
3153   MVT ContainerVT = VecVT;
3154   if (VecVT.isFixedLengthVector()) {
3155     ContainerVT = getContainerForFixedLengthVector(VecVT);
3156     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3157   }
3158 
3159   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3160   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3161 
3162   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3163   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3164 
3165   if (VecVT.isScalableVector()) {
3166     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3167     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3168   }
3169 
3170   SDValue Mask, VL;
3171   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3172 
3173   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3174   SDValue Trunc =
3175       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3176   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3177                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3178   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3179 }
3180 
3181 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3182 // first position of a vector, and that vector is slid up to the insert index.
3183 // By limiting the active vector length to index+1 and merging with the
3184 // original vector (with an undisturbed tail policy for elements >= VL), we
3185 // achieve the desired result of leaving all elements untouched except the one
3186 // at VL-1, which is replaced with the desired value.
3187 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3188                                                     SelectionDAG &DAG) const {
3189   SDLoc DL(Op);
3190   MVT VecVT = Op.getSimpleValueType();
3191   SDValue Vec = Op.getOperand(0);
3192   SDValue Val = Op.getOperand(1);
3193   SDValue Idx = Op.getOperand(2);
3194 
3195   if (VecVT.getVectorElementType() == MVT::i1) {
3196     // FIXME: For now we just promote to an i8 vector and insert into that,
3197     // but this is probably not optimal.
3198     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3199     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3200     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3201     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3202   }
3203 
3204   MVT ContainerVT = VecVT;
3205   // If the operand is a fixed-length vector, convert to a scalable one.
3206   if (VecVT.isFixedLengthVector()) {
3207     ContainerVT = getContainerForFixedLengthVector(VecVT);
3208     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3209   }
3210 
3211   MVT XLenVT = Subtarget.getXLenVT();
3212 
3213   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3214   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3215   // Even i64-element vectors on RV32 can be lowered without scalar
3216   // legalization if the most-significant 32 bits of the value are not affected
3217   // by the sign-extension of the lower 32 bits.
3218   // TODO: We could also catch sign extensions of a 32-bit value.
3219   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3220     const auto *CVal = cast<ConstantSDNode>(Val);
3221     if (isInt<32>(CVal->getSExtValue())) {
3222       IsLegalInsert = true;
3223       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3224     }
3225   }
3226 
3227   SDValue Mask, VL;
3228   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3229 
3230   SDValue ValInVec;
3231 
3232   if (IsLegalInsert) {
3233     unsigned Opc =
3234         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3235     if (isNullConstant(Idx)) {
3236       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3237       if (!VecVT.isFixedLengthVector())
3238         return Vec;
3239       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3240     }
3241     ValInVec =
3242         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3243   } else {
3244     // On RV32, i64-element vectors must be specially handled to place the
3245     // value at element 0, by using two vslide1up instructions in sequence on
3246     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3247     // this.
3248     SDValue One = DAG.getConstant(1, DL, XLenVT);
3249     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3250     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3251     MVT I32ContainerVT =
3252         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3253     SDValue I32Mask =
3254         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3255     // Limit the active VL to two.
3256     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3257     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3258     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3259     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3260                            InsertI64VL);
3261     // First slide in the hi value, then the lo in underneath it.
3262     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3263                            ValHi, I32Mask, InsertI64VL);
3264     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3265                            ValLo, I32Mask, InsertI64VL);
3266     // Bitcast back to the right container type.
3267     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3268   }
3269 
3270   // Now that the value is in a vector, slide it into position.
3271   SDValue InsertVL =
3272       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3273   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3274                                 ValInVec, Idx, Mask, InsertVL);
3275   if (!VecVT.isFixedLengthVector())
3276     return Slideup;
3277   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3278 }
3279 
3280 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3281 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3282 // types this is done using VMV_X_S to allow us to glean information about the
3283 // sign bits of the result.
3284 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3285                                                      SelectionDAG &DAG) const {
3286   SDLoc DL(Op);
3287   SDValue Idx = Op.getOperand(1);
3288   SDValue Vec = Op.getOperand(0);
3289   EVT EltVT = Op.getValueType();
3290   MVT VecVT = Vec.getSimpleValueType();
3291   MVT XLenVT = Subtarget.getXLenVT();
3292 
3293   if (VecVT.getVectorElementType() == MVT::i1) {
3294     // FIXME: For now we just promote to an i8 vector and extract from that,
3295     // but this is probably not optimal.
3296     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3297     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3298     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3299   }
3300 
3301   // If this is a fixed vector, we need to convert it to a scalable vector.
3302   MVT ContainerVT = VecVT;
3303   if (VecVT.isFixedLengthVector()) {
3304     ContainerVT = getContainerForFixedLengthVector(VecVT);
3305     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3306   }
3307 
3308   // If the index is 0, the vector is already in the right position.
3309   if (!isNullConstant(Idx)) {
3310     // Use a VL of 1 to avoid processing more elements than we need.
3311     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3312     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3313     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3314     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3315                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3316   }
3317 
3318   if (!EltVT.isInteger()) {
3319     // Floating-point extracts are handled in TableGen.
3320     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3321                        DAG.getConstant(0, DL, XLenVT));
3322   }
3323 
3324   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3325   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3326 }
3327 
3328 // Some RVV intrinsics may claim that they want an integer operand to be
3329 // promoted or expanded.
3330 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3331                                           const RISCVSubtarget &Subtarget) {
3332   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3333           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3334          "Unexpected opcode");
3335 
3336   if (!Subtarget.hasStdExtV())
3337     return SDValue();
3338 
3339   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3340   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3341   SDLoc DL(Op);
3342 
3343   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3344       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3345   if (!II || !II->SplatOperand)
3346     return SDValue();
3347 
3348   unsigned SplatOp = II->SplatOperand + HasChain;
3349   assert(SplatOp < Op.getNumOperands());
3350 
3351   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3352   SDValue &ScalarOp = Operands[SplatOp];
3353   MVT OpVT = ScalarOp.getSimpleValueType();
3354   MVT XLenVT = Subtarget.getXLenVT();
3355 
3356   // If this isn't a scalar, or its type is XLenVT we're done.
3357   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3358     return SDValue();
3359 
3360   // Simplest case is that the operand needs to be promoted to XLenVT.
3361   if (OpVT.bitsLT(XLenVT)) {
3362     // If the operand is a constant, sign extend to increase our chances
3363     // of being able to use a .vi instruction. ANY_EXTEND would become a
3364     // a zero extend and the simm5 check in isel would fail.
3365     // FIXME: Should we ignore the upper bits in isel instead?
3366     unsigned ExtOpc =
3367         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3368     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3369     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3370   }
3371 
3372   // Use the previous operand to get the vXi64 VT. The result might be a mask
3373   // VT for compares. Using the previous operand assumes that the previous
3374   // operand will never have a smaller element size than a scalar operand and
3375   // that a widening operation never uses SEW=64.
3376   // NOTE: If this fails the below assert, we can probably just find the
3377   // element count from any operand or result and use it to construct the VT.
3378   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3379   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3380 
3381   // The more complex case is when the scalar is larger than XLenVT.
3382   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3383          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3384 
3385   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3386   // on the instruction to sign-extend since SEW>XLEN.
3387   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3388     if (isInt<32>(CVal->getSExtValue())) {
3389       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3390       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3391     }
3392   }
3393 
3394   // We need to convert the scalar to a splat vector.
3395   // FIXME: Can we implicitly truncate the scalar if it is known to
3396   // be sign extended?
3397   // VL should be the last operand.
3398   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3399   assert(VL.getValueType() == XLenVT);
3400   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3401   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3402 }
3403 
3404 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3405                                                      SelectionDAG &DAG) const {
3406   unsigned IntNo = Op.getConstantOperandVal(0);
3407   SDLoc DL(Op);
3408   MVT XLenVT = Subtarget.getXLenVT();
3409 
3410   switch (IntNo) {
3411   default:
3412     break; // Don't custom lower most intrinsics.
3413   case Intrinsic::thread_pointer: {
3414     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3415     return DAG.getRegister(RISCV::X4, PtrVT);
3416   }
3417   case Intrinsic::riscv_orc_b:
3418     // Lower to the GORCI encoding for orc.b.
3419     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3420                        DAG.getConstant(7, DL, XLenVT));
3421   case Intrinsic::riscv_grev:
3422   case Intrinsic::riscv_gorc: {
3423     unsigned Opc =
3424         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3425     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3426   }
3427   case Intrinsic::riscv_shfl:
3428   case Intrinsic::riscv_unshfl: {
3429     unsigned Opc =
3430         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3431     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3432   }
3433   case Intrinsic::riscv_bcompress:
3434   case Intrinsic::riscv_bdecompress: {
3435     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3436                                                        : RISCVISD::BDECOMPRESS;
3437     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3438   }
3439   case Intrinsic::riscv_vmv_x_s:
3440     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3441     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3442                        Op.getOperand(1));
3443   case Intrinsic::riscv_vmv_v_x:
3444     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3445                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3446   case Intrinsic::riscv_vfmv_v_f:
3447     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3448                        Op.getOperand(1), Op.getOperand(2));
3449   case Intrinsic::riscv_vmv_s_x: {
3450     SDValue Scalar = Op.getOperand(2);
3451 
3452     if (Scalar.getValueType().bitsLE(XLenVT)) {
3453       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3454       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3455                          Op.getOperand(1), Scalar, Op.getOperand(3));
3456     }
3457 
3458     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3459 
3460     // This is an i64 value that lives in two scalar registers. We have to
3461     // insert this in a convoluted way. First we build vXi64 splat containing
3462     // the/ two values that we assemble using some bit math. Next we'll use
3463     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3464     // to merge element 0 from our splat into the source vector.
3465     // FIXME: This is probably not the best way to do this, but it is
3466     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3467     // point.
3468     //   sw lo, (a0)
3469     //   sw hi, 4(a0)
3470     //   vlse vX, (a0)
3471     //
3472     //   vid.v      vVid
3473     //   vmseq.vx   mMask, vVid, 0
3474     //   vmerge.vvm vDest, vSrc, vVal, mMask
3475     MVT VT = Op.getSimpleValueType();
3476     SDValue Vec = Op.getOperand(1);
3477     SDValue VL = Op.getOperand(3);
3478 
3479     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3480     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3481                                       DAG.getConstant(0, DL, MVT::i32), VL);
3482 
3483     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3484     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3485     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3486     SDValue SelectCond =
3487         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3488                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3489     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3490                        Vec, VL);
3491   }
3492   case Intrinsic::riscv_vslide1up:
3493   case Intrinsic::riscv_vslide1down:
3494   case Intrinsic::riscv_vslide1up_mask:
3495   case Intrinsic::riscv_vslide1down_mask: {
3496     // We need to special case these when the scalar is larger than XLen.
3497     unsigned NumOps = Op.getNumOperands();
3498     bool IsMasked = NumOps == 6;
3499     unsigned OpOffset = IsMasked ? 1 : 0;
3500     SDValue Scalar = Op.getOperand(2 + OpOffset);
3501     if (Scalar.getValueType().bitsLE(XLenVT))
3502       break;
3503 
3504     // Splatting a sign extended constant is fine.
3505     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3506       if (isInt<32>(CVal->getSExtValue()))
3507         break;
3508 
3509     MVT VT = Op.getSimpleValueType();
3510     assert(VT.getVectorElementType() == MVT::i64 &&
3511            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3512 
3513     // Convert the vector source to the equivalent nxvXi32 vector.
3514     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3515     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3516 
3517     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3518                                    DAG.getConstant(0, DL, XLenVT));
3519     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3520                                    DAG.getConstant(1, DL, XLenVT));
3521 
3522     // Double the VL since we halved SEW.
3523     SDValue VL = Op.getOperand(NumOps - 1);
3524     SDValue I32VL =
3525         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3526 
3527     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3528     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3529 
3530     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3531     // instructions.
3532     if (IntNo == Intrinsic::riscv_vslide1up ||
3533         IntNo == Intrinsic::riscv_vslide1up_mask) {
3534       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3535                         I32Mask, I32VL);
3536       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3537                         I32Mask, I32VL);
3538     } else {
3539       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3540                         I32Mask, I32VL);
3541       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3542                         I32Mask, I32VL);
3543     }
3544 
3545     // Convert back to nxvXi64.
3546     Vec = DAG.getBitcast(VT, Vec);
3547 
3548     if (!IsMasked)
3549       return Vec;
3550 
3551     // Apply mask after the operation.
3552     SDValue Mask = Op.getOperand(NumOps - 2);
3553     SDValue MaskedOff = Op.getOperand(1);
3554     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3555   }
3556   }
3557 
3558   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3559 }
3560 
3561 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3562                                                     SelectionDAG &DAG) const {
3563   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3564 }
3565 
3566 static MVT getLMUL1VT(MVT VT) {
3567   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3568          "Unexpected vector MVT");
3569   return MVT::getScalableVectorVT(
3570       VT.getVectorElementType(),
3571       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3572 }
3573 
3574 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3575   switch (ISDOpcode) {
3576   default:
3577     llvm_unreachable("Unhandled reduction");
3578   case ISD::VECREDUCE_ADD:
3579     return RISCVISD::VECREDUCE_ADD_VL;
3580   case ISD::VECREDUCE_UMAX:
3581     return RISCVISD::VECREDUCE_UMAX_VL;
3582   case ISD::VECREDUCE_SMAX:
3583     return RISCVISD::VECREDUCE_SMAX_VL;
3584   case ISD::VECREDUCE_UMIN:
3585     return RISCVISD::VECREDUCE_UMIN_VL;
3586   case ISD::VECREDUCE_SMIN:
3587     return RISCVISD::VECREDUCE_SMIN_VL;
3588   case ISD::VECREDUCE_AND:
3589     return RISCVISD::VECREDUCE_AND_VL;
3590   case ISD::VECREDUCE_OR:
3591     return RISCVISD::VECREDUCE_OR_VL;
3592   case ISD::VECREDUCE_XOR:
3593     return RISCVISD::VECREDUCE_XOR_VL;
3594   }
3595 }
3596 
3597 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3598                                                       SelectionDAG &DAG) const {
3599   SDLoc DL(Op);
3600   SDValue Vec = Op.getOperand(0);
3601   MVT VecVT = Vec.getSimpleValueType();
3602   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3603           Op.getOpcode() == ISD::VECREDUCE_OR ||
3604           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3605          "Unexpected reduction lowering");
3606 
3607   MVT XLenVT = Subtarget.getXLenVT();
3608   assert(Op.getValueType() == XLenVT &&
3609          "Expected reduction output to be legalized to XLenVT");
3610 
3611   MVT ContainerVT = VecVT;
3612   if (VecVT.isFixedLengthVector()) {
3613     ContainerVT = getContainerForFixedLengthVector(VecVT);
3614     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3615   }
3616 
3617   SDValue Mask, VL;
3618   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3619   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3620 
3621   switch (Op.getOpcode()) {
3622   default:
3623     llvm_unreachable("Unhandled reduction");
3624   case ISD::VECREDUCE_AND:
3625     // vpopc ~x == 0
3626     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3627     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3628     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3629   case ISD::VECREDUCE_OR:
3630     // vpopc x != 0
3631     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3632     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3633   case ISD::VECREDUCE_XOR: {
3634     // ((vpopc x) & 1) != 0
3635     SDValue One = DAG.getConstant(1, DL, XLenVT);
3636     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3637     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3638     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3639   }
3640   }
3641 }
3642 
3643 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3644                                             SelectionDAG &DAG) const {
3645   SDLoc DL(Op);
3646   SDValue Vec = Op.getOperand(0);
3647   EVT VecEVT = Vec.getValueType();
3648 
3649   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3650 
3651   // Due to ordering in legalize types we may have a vector type that needs to
3652   // be split. Do that manually so we can get down to a legal type.
3653   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3654          TargetLowering::TypeSplitVector) {
3655     SDValue Lo, Hi;
3656     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3657     VecEVT = Lo.getValueType();
3658     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3659   }
3660 
3661   // TODO: The type may need to be widened rather than split. Or widened before
3662   // it can be split.
3663   if (!isTypeLegal(VecEVT))
3664     return SDValue();
3665 
3666   MVT VecVT = VecEVT.getSimpleVT();
3667   MVT VecEltVT = VecVT.getVectorElementType();
3668   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3669 
3670   MVT ContainerVT = VecVT;
3671   if (VecVT.isFixedLengthVector()) {
3672     ContainerVT = getContainerForFixedLengthVector(VecVT);
3673     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3674   }
3675 
3676   MVT M1VT = getLMUL1VT(ContainerVT);
3677 
3678   SDValue Mask, VL;
3679   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3680 
3681   // FIXME: This is a VLMAX splat which might be too large and can prevent
3682   // vsetvli removal.
3683   SDValue NeutralElem =
3684       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3685   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3686   SDValue Reduction =
3687       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3688   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3689                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3690   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3691 }
3692 
3693 // Given a reduction op, this function returns the matching reduction opcode,
3694 // the vector SDValue and the scalar SDValue required to lower this to a
3695 // RISCVISD node.
3696 static std::tuple<unsigned, SDValue, SDValue>
3697 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3698   SDLoc DL(Op);
3699   auto Flags = Op->getFlags();
3700   unsigned Opcode = Op.getOpcode();
3701   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3702   switch (Opcode) {
3703   default:
3704     llvm_unreachable("Unhandled reduction");
3705   case ISD::VECREDUCE_FADD:
3706     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3707                            DAG.getConstantFP(0.0, DL, EltVT));
3708   case ISD::VECREDUCE_SEQ_FADD:
3709     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3710                            Op.getOperand(0));
3711   case ISD::VECREDUCE_FMIN:
3712     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3713                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3714   case ISD::VECREDUCE_FMAX:
3715     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3716                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3717   }
3718 }
3719 
3720 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3721                                               SelectionDAG &DAG) const {
3722   SDLoc DL(Op);
3723   MVT VecEltVT = Op.getSimpleValueType();
3724 
3725   unsigned RVVOpcode;
3726   SDValue VectorVal, ScalarVal;
3727   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3728       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3729   MVT VecVT = VectorVal.getSimpleValueType();
3730 
3731   MVT ContainerVT = VecVT;
3732   if (VecVT.isFixedLengthVector()) {
3733     ContainerVT = getContainerForFixedLengthVector(VecVT);
3734     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3735   }
3736 
3737   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3738 
3739   SDValue Mask, VL;
3740   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3741 
3742   // FIXME: This is a VLMAX splat which might be too large and can prevent
3743   // vsetvli removal.
3744   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3745   SDValue Reduction =
3746       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3747   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3748                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3749 }
3750 
3751 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3752                                                    SelectionDAG &DAG) const {
3753   SDValue Vec = Op.getOperand(0);
3754   SDValue SubVec = Op.getOperand(1);
3755   MVT VecVT = Vec.getSimpleValueType();
3756   MVT SubVecVT = SubVec.getSimpleValueType();
3757 
3758   SDLoc DL(Op);
3759   MVT XLenVT = Subtarget.getXLenVT();
3760   unsigned OrigIdx = Op.getConstantOperandVal(2);
3761   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3762 
3763   // We don't have the ability to slide mask vectors up indexed by their i1
3764   // elements; the smallest we can do is i8. Often we are able to bitcast to
3765   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3766   // into a scalable one, we might not necessarily have enough scalable
3767   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3768   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3769       (OrigIdx != 0 || !Vec.isUndef())) {
3770     if (VecVT.getVectorMinNumElements() >= 8 &&
3771         SubVecVT.getVectorMinNumElements() >= 8) {
3772       assert(OrigIdx % 8 == 0 && "Invalid index");
3773       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3774              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3775              "Unexpected mask vector lowering");
3776       OrigIdx /= 8;
3777       SubVecVT =
3778           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3779                            SubVecVT.isScalableVector());
3780       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3781                                VecVT.isScalableVector());
3782       Vec = DAG.getBitcast(VecVT, Vec);
3783       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3784     } else {
3785       // We can't slide this mask vector up indexed by its i1 elements.
3786       // This poses a problem when we wish to insert a scalable vector which
3787       // can't be re-expressed as a larger type. Just choose the slow path and
3788       // extend to a larger type, then truncate back down.
3789       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3790       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3791       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3792       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3793       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3794                         Op.getOperand(2));
3795       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3796       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3797     }
3798   }
3799 
3800   // If the subvector vector is a fixed-length type, we cannot use subregister
3801   // manipulation to simplify the codegen; we don't know which register of a
3802   // LMUL group contains the specific subvector as we only know the minimum
3803   // register size. Therefore we must slide the vector group up the full
3804   // amount.
3805   if (SubVecVT.isFixedLengthVector()) {
3806     if (OrigIdx == 0 && Vec.isUndef())
3807       return Op;
3808     MVT ContainerVT = VecVT;
3809     if (VecVT.isFixedLengthVector()) {
3810       ContainerVT = getContainerForFixedLengthVector(VecVT);
3811       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3812     }
3813     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3814                          DAG.getUNDEF(ContainerVT), SubVec,
3815                          DAG.getConstant(0, DL, XLenVT));
3816     SDValue Mask =
3817         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3818     // Set the vector length to only the number of elements we care about. Note
3819     // that for slideup this includes the offset.
3820     SDValue VL =
3821         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3822     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3823     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3824                                   SubVec, SlideupAmt, Mask, VL);
3825     if (VecVT.isFixedLengthVector())
3826       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3827     return DAG.getBitcast(Op.getValueType(), Slideup);
3828   }
3829 
3830   unsigned SubRegIdx, RemIdx;
3831   std::tie(SubRegIdx, RemIdx) =
3832       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3833           VecVT, SubVecVT, OrigIdx, TRI);
3834 
3835   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3836   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3837                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3838                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3839 
3840   // 1. If the Idx has been completely eliminated and this subvector's size is
3841   // a vector register or a multiple thereof, or the surrounding elements are
3842   // undef, then this is a subvector insert which naturally aligns to a vector
3843   // register. These can easily be handled using subregister manipulation.
3844   // 2. If the subvector is smaller than a vector register, then the insertion
3845   // must preserve the undisturbed elements of the register. We do this by
3846   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3847   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3848   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3849   // LMUL=1 type back into the larger vector (resolving to another subregister
3850   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3851   // to avoid allocating a large register group to hold our subvector.
3852   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3853     return Op;
3854 
3855   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3856   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3857   // (in our case undisturbed). This means we can set up a subvector insertion
3858   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3859   // size of the subvector.
3860   MVT InterSubVT = VecVT;
3861   SDValue AlignedExtract = Vec;
3862   unsigned AlignedIdx = OrigIdx - RemIdx;
3863   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3864     InterSubVT = getLMUL1VT(VecVT);
3865     // Extract a subvector equal to the nearest full vector register type. This
3866     // should resolve to a EXTRACT_SUBREG instruction.
3867     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3868                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3869   }
3870 
3871   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3872   // For scalable vectors this must be further multiplied by vscale.
3873   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3874 
3875   SDValue Mask, VL;
3876   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3877 
3878   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3879   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3880   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3881   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3882 
3883   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3884                        DAG.getUNDEF(InterSubVT), SubVec,
3885                        DAG.getConstant(0, DL, XLenVT));
3886 
3887   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3888                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3889 
3890   // If required, insert this subvector back into the correct vector register.
3891   // This should resolve to an INSERT_SUBREG instruction.
3892   if (VecVT.bitsGT(InterSubVT))
3893     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3894                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3895 
3896   // We might have bitcast from a mask type: cast back to the original type if
3897   // required.
3898   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3899 }
3900 
3901 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3902                                                     SelectionDAG &DAG) const {
3903   SDValue Vec = Op.getOperand(0);
3904   MVT SubVecVT = Op.getSimpleValueType();
3905   MVT VecVT = Vec.getSimpleValueType();
3906 
3907   SDLoc DL(Op);
3908   MVT XLenVT = Subtarget.getXLenVT();
3909   unsigned OrigIdx = Op.getConstantOperandVal(1);
3910   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3911 
3912   // We don't have the ability to slide mask vectors down indexed by their i1
3913   // elements; the smallest we can do is i8. Often we are able to bitcast to
3914   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3915   // from a scalable one, we might not necessarily have enough scalable
3916   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3917   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3918     if (VecVT.getVectorMinNumElements() >= 8 &&
3919         SubVecVT.getVectorMinNumElements() >= 8) {
3920       assert(OrigIdx % 8 == 0 && "Invalid index");
3921       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3922              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3923              "Unexpected mask vector lowering");
3924       OrigIdx /= 8;
3925       SubVecVT =
3926           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3927                            SubVecVT.isScalableVector());
3928       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3929                                VecVT.isScalableVector());
3930       Vec = DAG.getBitcast(VecVT, Vec);
3931     } else {
3932       // We can't slide this mask vector down, indexed by its i1 elements.
3933       // This poses a problem when we wish to extract a scalable vector which
3934       // can't be re-expressed as a larger type. Just choose the slow path and
3935       // extend to a larger type, then truncate back down.
3936       // TODO: We could probably improve this when extracting certain fixed
3937       // from fixed, where we can extract as i8 and shift the correct element
3938       // right to reach the desired subvector?
3939       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3940       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3941       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3942       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3943                         Op.getOperand(1));
3944       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3945       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3946     }
3947   }
3948 
3949   // If the subvector vector is a fixed-length type, we cannot use subregister
3950   // manipulation to simplify the codegen; we don't know which register of a
3951   // LMUL group contains the specific subvector as we only know the minimum
3952   // register size. Therefore we must slide the vector group down the full
3953   // amount.
3954   if (SubVecVT.isFixedLengthVector()) {
3955     // With an index of 0 this is a cast-like subvector, which can be performed
3956     // with subregister operations.
3957     if (OrigIdx == 0)
3958       return Op;
3959     MVT ContainerVT = VecVT;
3960     if (VecVT.isFixedLengthVector()) {
3961       ContainerVT = getContainerForFixedLengthVector(VecVT);
3962       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3963     }
3964     SDValue Mask =
3965         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3966     // Set the vector length to only the number of elements we care about. This
3967     // avoids sliding down elements we're going to discard straight away.
3968     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3969     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3970     SDValue Slidedown =
3971         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3972                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3973     // Now we can use a cast-like subvector extract to get the result.
3974     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3975                             DAG.getConstant(0, DL, XLenVT));
3976     return DAG.getBitcast(Op.getValueType(), Slidedown);
3977   }
3978 
3979   unsigned SubRegIdx, RemIdx;
3980   std::tie(SubRegIdx, RemIdx) =
3981       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3982           VecVT, SubVecVT, OrigIdx, TRI);
3983 
3984   // If the Idx has been completely eliminated then this is a subvector extract
3985   // which naturally aligns to a vector register. These can easily be handled
3986   // using subregister manipulation.
3987   if (RemIdx == 0)
3988     return Op;
3989 
3990   // Else we must shift our vector register directly to extract the subvector.
3991   // Do this using VSLIDEDOWN.
3992 
3993   // If the vector type is an LMUL-group type, extract a subvector equal to the
3994   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3995   // instruction.
3996   MVT InterSubVT = VecVT;
3997   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3998     InterSubVT = getLMUL1VT(VecVT);
3999     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4000                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4001   }
4002 
4003   // Slide this vector register down by the desired number of elements in order
4004   // to place the desired subvector starting at element 0.
4005   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4006   // For scalable vectors this must be further multiplied by vscale.
4007   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4008 
4009   SDValue Mask, VL;
4010   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4011   SDValue Slidedown =
4012       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4013                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4014 
4015   // Now the vector is in the right position, extract our final subvector. This
4016   // should resolve to a COPY.
4017   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4018                           DAG.getConstant(0, DL, XLenVT));
4019 
4020   // We might have bitcast from a mask type: cast back to the original type if
4021   // required.
4022   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4023 }
4024 
4025 // Lower step_vector to the vid instruction. Any non-identity step value must
4026 // be accounted for my manual expansion.
4027 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4028                                               SelectionDAG &DAG) const {
4029   SDLoc DL(Op);
4030   MVT VT = Op.getSimpleValueType();
4031   MVT XLenVT = Subtarget.getXLenVT();
4032   SDValue Mask, VL;
4033   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4034   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4035   uint64_t StepValImm = Op.getConstantOperandVal(0);
4036   if (StepValImm != 1) {
4037     assert(Op.getOperand(0).getValueType() == XLenVT &&
4038            "Unexpected step value type");
4039     if (isPowerOf2_64(StepValImm)) {
4040       SDValue StepVal =
4041           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4042                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4043       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4044     } else {
4045       SDValue StepVal =
4046           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
4047       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4048     }
4049   }
4050   return StepVec;
4051 }
4052 
4053 // Implement vector_reverse using vrgather.vv with indices determined by
4054 // subtracting the id of each element from (VLMAX-1). This will convert
4055 // the indices like so:
4056 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4057 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4058 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4059                                                  SelectionDAG &DAG) const {
4060   SDLoc DL(Op);
4061   MVT VecVT = Op.getSimpleValueType();
4062   unsigned EltSize = VecVT.getScalarSizeInBits();
4063   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4064 
4065   unsigned MaxVLMAX = 0;
4066   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4067   if (VectorBitsMax != 0)
4068     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4069 
4070   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4071   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4072 
4073   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4074   // to use vrgatherei16.vv.
4075   // TODO: It's also possible to use vrgatherei16.vv for other types to
4076   // decrease register width for the index calculation.
4077   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4078     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4079     // Reverse each half, then reassemble them in reverse order.
4080     // NOTE: It's also possible that after splitting that VLMAX no longer
4081     // requires vrgatherei16.vv.
4082     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4083       SDValue Lo, Hi;
4084       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4085       EVT LoVT, HiVT;
4086       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4087       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4088       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4089       // Reassemble the low and high pieces reversed.
4090       // FIXME: This is a CONCAT_VECTORS.
4091       SDValue Res =
4092           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4093                       DAG.getIntPtrConstant(0, DL));
4094       return DAG.getNode(
4095           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4096           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4097     }
4098 
4099     // Just promote the int type to i16 which will double the LMUL.
4100     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4101     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4102   }
4103 
4104   MVT XLenVT = Subtarget.getXLenVT();
4105   SDValue Mask, VL;
4106   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4107 
4108   // Calculate VLMAX-1 for the desired SEW.
4109   unsigned MinElts = VecVT.getVectorMinNumElements();
4110   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4111                               DAG.getConstant(MinElts, DL, XLenVT));
4112   SDValue VLMinus1 =
4113       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4114 
4115   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4116   bool IsRV32E64 =
4117       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4118   SDValue SplatVL;
4119   if (!IsRV32E64)
4120     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4121   else
4122     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4123 
4124   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4125   SDValue Indices =
4126       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4127 
4128   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4129 }
4130 
4131 SDValue
4132 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4133                                                      SelectionDAG &DAG) const {
4134   SDLoc DL(Op);
4135   auto *Load = cast<LoadSDNode>(Op);
4136 
4137   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4138                                         Load->getMemoryVT(),
4139                                         *Load->getMemOperand()) &&
4140          "Expecting a correctly-aligned load");
4141 
4142   MVT VT = Op.getSimpleValueType();
4143   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4144 
4145   SDValue VL =
4146       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4147 
4148   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4149   SDValue NewLoad = DAG.getMemIntrinsicNode(
4150       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4151       Load->getMemoryVT(), Load->getMemOperand());
4152 
4153   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4154   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4155 }
4156 
4157 SDValue
4158 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4159                                                       SelectionDAG &DAG) const {
4160   SDLoc DL(Op);
4161   auto *Store = cast<StoreSDNode>(Op);
4162 
4163   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4164                                         Store->getMemoryVT(),
4165                                         *Store->getMemOperand()) &&
4166          "Expecting a correctly-aligned store");
4167 
4168   SDValue StoreVal = Store->getValue();
4169   MVT VT = StoreVal.getSimpleValueType();
4170 
4171   // If the size less than a byte, we need to pad with zeros to make a byte.
4172   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4173     VT = MVT::v8i1;
4174     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4175                            DAG.getConstant(0, DL, VT), StoreVal,
4176                            DAG.getIntPtrConstant(0, DL));
4177   }
4178 
4179   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4180 
4181   SDValue VL =
4182       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4183 
4184   SDValue NewValue =
4185       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4186   return DAG.getMemIntrinsicNode(
4187       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4188       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4189       Store->getMemoryVT(), Store->getMemOperand());
4190 }
4191 
4192 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4193   auto *Load = cast<MaskedLoadSDNode>(Op);
4194 
4195   SDLoc DL(Op);
4196   MVT VT = Op.getSimpleValueType();
4197   MVT XLenVT = Subtarget.getXLenVT();
4198 
4199   SDValue Mask = Load->getMask();
4200   SDValue PassThru = Load->getPassThru();
4201   SDValue VL;
4202 
4203   MVT ContainerVT = VT;
4204   if (VT.isFixedLengthVector()) {
4205     ContainerVT = getContainerForFixedLengthVector(VT);
4206     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4207 
4208     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4209     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4210     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4211   } else
4212     VL = DAG.getRegister(RISCV::X0, XLenVT);
4213 
4214   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4215   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4216   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4217                    Load->getBasePtr(), Mask,  VL};
4218   SDValue Result =
4219       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4220                               Load->getMemoryVT(), Load->getMemOperand());
4221   SDValue Chain = Result.getValue(1);
4222 
4223   if (VT.isFixedLengthVector())
4224     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4225 
4226   return DAG.getMergeValues({Result, Chain}, DL);
4227 }
4228 
4229 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4230   auto *Store = cast<MaskedStoreSDNode>(Op);
4231 
4232   SDLoc DL(Op);
4233   SDValue Val = Store->getValue();
4234   SDValue Mask = Store->getMask();
4235   MVT VT = Val.getSimpleValueType();
4236   MVT XLenVT = Subtarget.getXLenVT();
4237   SDValue VL;
4238 
4239   MVT ContainerVT = VT;
4240   if (VT.isFixedLengthVector()) {
4241     ContainerVT = getContainerForFixedLengthVector(VT);
4242     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4243 
4244     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4245     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4246     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4247   } else
4248     VL = DAG.getRegister(RISCV::X0, XLenVT);
4249 
4250   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4251   return DAG.getMemIntrinsicNode(
4252       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4253       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4254       Store->getMemoryVT(), Store->getMemOperand());
4255 }
4256 
4257 SDValue
4258 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4259                                                       SelectionDAG &DAG) const {
4260   MVT InVT = Op.getOperand(0).getSimpleValueType();
4261   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4262 
4263   MVT VT = Op.getSimpleValueType();
4264 
4265   SDValue Op1 =
4266       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4267   SDValue Op2 =
4268       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4269 
4270   SDLoc DL(Op);
4271   SDValue VL =
4272       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4273 
4274   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4275   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4276 
4277   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4278                             Op.getOperand(2), Mask, VL);
4279 
4280   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4281 }
4282 
4283 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4284     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4285   MVT VT = Op.getSimpleValueType();
4286 
4287   if (VT.getVectorElementType() == MVT::i1)
4288     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4289 
4290   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4291 }
4292 
4293 // Lower vector ABS to smax(X, sub(0, X)).
4294 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4295   SDLoc DL(Op);
4296   MVT VT = Op.getSimpleValueType();
4297   SDValue X = Op.getOperand(0);
4298 
4299   assert(VT.isFixedLengthVector() && "Unexpected type");
4300 
4301   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4302   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4303 
4304   SDValue Mask, VL;
4305   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4306 
4307   SDValue SplatZero =
4308       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4309                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4310   SDValue NegX =
4311       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4312   SDValue Max =
4313       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4314 
4315   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4316 }
4317 
4318 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4319     SDValue Op, SelectionDAG &DAG) const {
4320   SDLoc DL(Op);
4321   MVT VT = Op.getSimpleValueType();
4322   SDValue Mag = Op.getOperand(0);
4323   SDValue Sign = Op.getOperand(1);
4324   assert(Mag.getValueType() == Sign.getValueType() &&
4325          "Can only handle COPYSIGN with matching types.");
4326 
4327   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4328   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4329   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4330 
4331   SDValue Mask, VL;
4332   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4333 
4334   SDValue CopySign =
4335       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4336 
4337   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4338 }
4339 
4340 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4341     SDValue Op, SelectionDAG &DAG) const {
4342   MVT VT = Op.getSimpleValueType();
4343   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4344 
4345   MVT I1ContainerVT =
4346       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4347 
4348   SDValue CC =
4349       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4350   SDValue Op1 =
4351       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4352   SDValue Op2 =
4353       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4354 
4355   SDLoc DL(Op);
4356   SDValue Mask, VL;
4357   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4358 
4359   SDValue Select =
4360       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4361 
4362   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4363 }
4364 
4365 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4366                                                unsigned NewOpc,
4367                                                bool HasMask) const {
4368   MVT VT = Op.getSimpleValueType();
4369   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4370 
4371   // Create list of operands by converting existing ones to scalable types.
4372   SmallVector<SDValue, 6> Ops;
4373   for (const SDValue &V : Op->op_values()) {
4374     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4375 
4376     // Pass through non-vector operands.
4377     if (!V.getValueType().isVector()) {
4378       Ops.push_back(V);
4379       continue;
4380     }
4381 
4382     // "cast" fixed length vector to a scalable vector.
4383     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4384            "Only fixed length vectors are supported!");
4385     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4386   }
4387 
4388   SDLoc DL(Op);
4389   SDValue Mask, VL;
4390   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4391   if (HasMask)
4392     Ops.push_back(Mask);
4393   Ops.push_back(VL);
4394 
4395   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4396   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4397 }
4398 
4399 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4400 // * Operands of each node are assumed to be in the same order.
4401 // * The EVL operand is promoted from i32 to i64 on RV64.
4402 // * Fixed-length vectors are converted to their scalable-vector container
4403 //   types.
4404 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4405                                        unsigned RISCVISDOpc) const {
4406   SDLoc DL(Op);
4407   MVT VT = Op.getSimpleValueType();
4408   SmallVector<SDValue, 4> Ops;
4409 
4410   for (const auto &OpIdx : enumerate(Op->ops())) {
4411     SDValue V = OpIdx.value();
4412     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4413     // Pass through operands which aren't fixed-length vectors.
4414     if (!V.getValueType().isFixedLengthVector()) {
4415       Ops.push_back(V);
4416       continue;
4417     }
4418     // "cast" fixed length vector to a scalable vector.
4419     MVT OpVT = V.getSimpleValueType();
4420     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4421     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4422            "Only fixed length vectors are supported!");
4423     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4424   }
4425 
4426   if (!VT.isFixedLengthVector())
4427     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4428 
4429   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4430 
4431   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4432 
4433   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4434 }
4435 
4436 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4437 // a RVV indexed load. The RVV indexed load instructions only support the
4438 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4439 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4440 // indexing is extended to the XLEN value type and scaled accordingly.
4441 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4442   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4443   SDLoc DL(Op);
4444 
4445   SDValue Index = MGN->getIndex();
4446   SDValue Mask = MGN->getMask();
4447   SDValue PassThru = MGN->getPassThru();
4448 
4449   MVT VT = Op.getSimpleValueType();
4450   MVT IndexVT = Index.getSimpleValueType();
4451   MVT XLenVT = Subtarget.getXLenVT();
4452 
4453   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4454          "Unexpected VTs!");
4455   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4456          "Unexpected pointer type");
4457   // Targets have to explicitly opt-in for extending vector loads.
4458   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4459          "Unexpected extending MGATHER");
4460 
4461   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4462   // the selection of the masked intrinsics doesn't do this for us.
4463   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4464 
4465   SDValue VL;
4466   MVT ContainerVT = VT;
4467   if (VT.isFixedLengthVector()) {
4468     // We need to use the larger of the result and index type to determine the
4469     // scalable type to use so we don't increase LMUL for any operand/result.
4470     if (VT.bitsGE(IndexVT)) {
4471       ContainerVT = getContainerForFixedLengthVector(VT);
4472       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4473                                  ContainerVT.getVectorElementCount());
4474     } else {
4475       IndexVT = getContainerForFixedLengthVector(IndexVT);
4476       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4477                                      IndexVT.getVectorElementCount());
4478     }
4479 
4480     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4481 
4482     if (!IsUnmasked) {
4483       MVT MaskVT =
4484           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4485       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4486       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4487     }
4488 
4489     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4490   } else
4491     VL = DAG.getRegister(RISCV::X0, XLenVT);
4492 
4493   unsigned IntID =
4494       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4495   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4496                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4497   if (!IsUnmasked)
4498     Ops.push_back(PassThru);
4499   Ops.push_back(MGN->getBasePtr());
4500   Ops.push_back(Index);
4501   if (!IsUnmasked)
4502     Ops.push_back(Mask);
4503   Ops.push_back(VL);
4504 
4505   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4506   SDValue Result =
4507       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4508                               MGN->getMemoryVT(), MGN->getMemOperand());
4509   SDValue Chain = Result.getValue(1);
4510 
4511   if (VT.isFixedLengthVector())
4512     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4513 
4514   return DAG.getMergeValues({Result, Chain}, DL);
4515 }
4516 
4517 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4518 // a RVV indexed store. The RVV indexed store instructions only support the
4519 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4520 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4521 // indexing is extended to the XLEN value type and scaled accordingly.
4522 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4523                                            SelectionDAG &DAG) const {
4524   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4525   SDLoc DL(Op);
4526   SDValue Index = MSN->getIndex();
4527   SDValue Mask = MSN->getMask();
4528   SDValue Val = MSN->getValue();
4529 
4530   MVT VT = Val.getSimpleValueType();
4531   MVT IndexVT = Index.getSimpleValueType();
4532   MVT XLenVT = Subtarget.getXLenVT();
4533 
4534   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4535          "Unexpected VTs!");
4536   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4537          "Unexpected pointer type");
4538   // Targets have to explicitly opt-in for extending vector loads and
4539   // truncating vector stores.
4540   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4541 
4542   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4543   // the selection of the masked intrinsics doesn't do this for us.
4544   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4545 
4546   SDValue VL;
4547   if (VT.isFixedLengthVector()) {
4548     // We need to use the larger of the value and index type to determine the
4549     // scalable type to use so we don't increase LMUL for any operand/result.
4550     MVT ContainerVT;
4551     if (VT.bitsGE(IndexVT)) {
4552       ContainerVT = getContainerForFixedLengthVector(VT);
4553       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4554                                  ContainerVT.getVectorElementCount());
4555     } else {
4556       IndexVT = getContainerForFixedLengthVector(IndexVT);
4557       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4558                                      IndexVT.getVectorElementCount());
4559     }
4560 
4561     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4562     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4563 
4564     if (!IsUnmasked) {
4565       MVT MaskVT =
4566           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4567       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4568     }
4569 
4570     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4571   } else
4572     VL = DAG.getRegister(RISCV::X0, XLenVT);
4573 
4574   unsigned IntID =
4575       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4576   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4577                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4578   Ops.push_back(Val);
4579   Ops.push_back(MSN->getBasePtr());
4580   Ops.push_back(Index);
4581   if (!IsUnmasked)
4582     Ops.push_back(Mask);
4583   Ops.push_back(VL);
4584 
4585   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4586                                  MSN->getMemoryVT(), MSN->getMemOperand());
4587 }
4588 
4589 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4590                                                SelectionDAG &DAG) const {
4591   const MVT XLenVT = Subtarget.getXLenVT();
4592   SDLoc DL(Op);
4593   SDValue Chain = Op->getOperand(0);
4594   SDValue SysRegNo = DAG.getConstant(
4595       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4596   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4597   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4598 
4599   // Encoding used for rounding mode in RISCV differs from that used in
4600   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4601   // table, which consists of a sequence of 4-bit fields, each representing
4602   // corresponding FLT_ROUNDS mode.
4603   static const int Table =
4604       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4605       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4606       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4607       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4608       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4609 
4610   SDValue Shift =
4611       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4612   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4613                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4614   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4615                                DAG.getConstant(7, DL, XLenVT));
4616 
4617   return DAG.getMergeValues({Masked, Chain}, DL);
4618 }
4619 
4620 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4621                                                SelectionDAG &DAG) const {
4622   const MVT XLenVT = Subtarget.getXLenVT();
4623   SDLoc DL(Op);
4624   SDValue Chain = Op->getOperand(0);
4625   SDValue RMValue = Op->getOperand(1);
4626   SDValue SysRegNo = DAG.getConstant(
4627       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4628 
4629   // Encoding used for rounding mode in RISCV differs from that used in
4630   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4631   // a table, which consists of a sequence of 4-bit fields, each representing
4632   // corresponding RISCV mode.
4633   static const unsigned Table =
4634       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4635       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4636       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4637       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4638       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4639 
4640   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4641                               DAG.getConstant(2, DL, XLenVT));
4642   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4643                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4644   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4645                         DAG.getConstant(0x7, DL, XLenVT));
4646   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4647                      RMValue);
4648 }
4649 
4650 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4651 // form of the given Opcode.
4652 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4653   switch (Opcode) {
4654   default:
4655     llvm_unreachable("Unexpected opcode");
4656   case ISD::SHL:
4657     return RISCVISD::SLLW;
4658   case ISD::SRA:
4659     return RISCVISD::SRAW;
4660   case ISD::SRL:
4661     return RISCVISD::SRLW;
4662   case ISD::SDIV:
4663     return RISCVISD::DIVW;
4664   case ISD::UDIV:
4665     return RISCVISD::DIVUW;
4666   case ISD::UREM:
4667     return RISCVISD::REMUW;
4668   case ISD::ROTL:
4669     return RISCVISD::ROLW;
4670   case ISD::ROTR:
4671     return RISCVISD::RORW;
4672   case RISCVISD::GREV:
4673     return RISCVISD::GREVW;
4674   case RISCVISD::GORC:
4675     return RISCVISD::GORCW;
4676   }
4677 }
4678 
4679 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4680 // Because i32 isn't a legal type for RV64, these operations would otherwise
4681 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4682 // later one because the fact the operation was originally of type i32 is
4683 // lost.
4684 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4685                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4686   SDLoc DL(N);
4687   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4688   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4689   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4690   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4691   // ReplaceNodeResults requires we maintain the same type for the return value.
4692   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4693 }
4694 
4695 // Converts the given 32-bit operation to a i64 operation with signed extension
4696 // semantic to reduce the signed extension instructions.
4697 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4698   SDLoc DL(N);
4699   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4700   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4701   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4702   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4703                                DAG.getValueType(MVT::i32));
4704   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4705 }
4706 
4707 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4708                                              SmallVectorImpl<SDValue> &Results,
4709                                              SelectionDAG &DAG) const {
4710   SDLoc DL(N);
4711   switch (N->getOpcode()) {
4712   default:
4713     llvm_unreachable("Don't know how to custom type legalize this operation!");
4714   case ISD::STRICT_FP_TO_SINT:
4715   case ISD::STRICT_FP_TO_UINT:
4716   case ISD::FP_TO_SINT:
4717   case ISD::FP_TO_UINT: {
4718     bool IsStrict = N->isStrictFPOpcode();
4719     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4720            "Unexpected custom legalisation");
4721     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4722     // If the FP type needs to be softened, emit a library call using the 'si'
4723     // version. If we left it to default legalization we'd end up with 'di'. If
4724     // the FP type doesn't need to be softened just let generic type
4725     // legalization promote the result type.
4726     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4727         TargetLowering::TypeSoftenFloat)
4728       return;
4729     RTLIB::Libcall LC;
4730     if (N->getOpcode() == ISD::FP_TO_SINT ||
4731         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4732       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4733     else
4734       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4735     MakeLibCallOptions CallOptions;
4736     EVT OpVT = Op0.getValueType();
4737     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4738     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4739     SDValue Result;
4740     std::tie(Result, Chain) =
4741         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4742     Results.push_back(Result);
4743     if (IsStrict)
4744       Results.push_back(Chain);
4745     break;
4746   }
4747   case ISD::READCYCLECOUNTER: {
4748     assert(!Subtarget.is64Bit() &&
4749            "READCYCLECOUNTER only has custom type legalization on riscv32");
4750 
4751     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4752     SDValue RCW =
4753         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4754 
4755     Results.push_back(
4756         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4757     Results.push_back(RCW.getValue(2));
4758     break;
4759   }
4760   case ISD::MUL: {
4761     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4762     unsigned XLen = Subtarget.getXLen();
4763     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4764     if (Size > XLen) {
4765       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4766       SDValue LHS = N->getOperand(0);
4767       SDValue RHS = N->getOperand(1);
4768       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4769 
4770       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4771       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4772       // We need exactly one side to be unsigned.
4773       if (LHSIsU == RHSIsU)
4774         return;
4775 
4776       auto MakeMULPair = [&](SDValue S, SDValue U) {
4777         MVT XLenVT = Subtarget.getXLenVT();
4778         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4779         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4780         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4781         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4782         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4783       };
4784 
4785       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4786       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4787 
4788       // The other operand should be signed, but still prefer MULH when
4789       // possible.
4790       if (RHSIsU && LHSIsS && !RHSIsS)
4791         Results.push_back(MakeMULPair(LHS, RHS));
4792       else if (LHSIsU && RHSIsS && !LHSIsS)
4793         Results.push_back(MakeMULPair(RHS, LHS));
4794 
4795       return;
4796     }
4797     LLVM_FALLTHROUGH;
4798   }
4799   case ISD::ADD:
4800   case ISD::SUB:
4801     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4802            "Unexpected custom legalisation");
4803     if (N->getOperand(1).getOpcode() == ISD::Constant)
4804       return;
4805     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4806     break;
4807   case ISD::SHL:
4808   case ISD::SRA:
4809   case ISD::SRL:
4810     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4811            "Unexpected custom legalisation");
4812     if (N->getOperand(1).getOpcode() == ISD::Constant)
4813       return;
4814     Results.push_back(customLegalizeToWOp(N, DAG));
4815     break;
4816   case ISD::ROTL:
4817   case ISD::ROTR:
4818     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4819            "Unexpected custom legalisation");
4820     Results.push_back(customLegalizeToWOp(N, DAG));
4821     break;
4822   case ISD::CTTZ:
4823   case ISD::CTTZ_ZERO_UNDEF:
4824   case ISD::CTLZ:
4825   case ISD::CTLZ_ZERO_UNDEF: {
4826     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4827            "Unexpected custom legalisation");
4828 
4829     SDValue NewOp0 =
4830         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4831     bool IsCTZ =
4832         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4833     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4834     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4835     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4836     return;
4837   }
4838   case ISD::SDIV:
4839   case ISD::UDIV:
4840   case ISD::UREM: {
4841     MVT VT = N->getSimpleValueType(0);
4842     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4843            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4844            "Unexpected custom legalisation");
4845     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4846         N->getOperand(1).getOpcode() == ISD::Constant)
4847       return;
4848 
4849     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4850     // the upper 32 bits. For other types we need to sign or zero extend
4851     // based on the opcode.
4852     unsigned ExtOpc = ISD::ANY_EXTEND;
4853     if (VT != MVT::i32)
4854       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4855                                            : ISD::ZERO_EXTEND;
4856 
4857     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4858     break;
4859   }
4860   case ISD::UADDO:
4861   case ISD::USUBO: {
4862     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4863            "Unexpected custom legalisation");
4864     bool IsAdd = N->getOpcode() == ISD::UADDO;
4865     // Create an ADDW or SUBW.
4866     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4867     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4868     SDValue Res =
4869         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4870     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4871                       DAG.getValueType(MVT::i32));
4872 
4873     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4874     // Since the inputs are sign extended from i32, this is equivalent to
4875     // comparing the lower 32 bits.
4876     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4877     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4878                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4879 
4880     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4881     Results.push_back(Overflow);
4882     return;
4883   }
4884   case ISD::UADDSAT:
4885   case ISD::USUBSAT: {
4886     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4887            "Unexpected custom legalisation");
4888     if (Subtarget.hasStdExtZbb()) {
4889       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4890       // sign extend allows overflow of the lower 32 bits to be detected on
4891       // the promoted size.
4892       SDValue LHS =
4893           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4894       SDValue RHS =
4895           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4896       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4897       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4898       return;
4899     }
4900 
4901     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4902     // promotion for UADDO/USUBO.
4903     Results.push_back(expandAddSubSat(N, DAG));
4904     return;
4905   }
4906   case ISD::BITCAST: {
4907     EVT VT = N->getValueType(0);
4908     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4909     SDValue Op0 = N->getOperand(0);
4910     EVT Op0VT = Op0.getValueType();
4911     MVT XLenVT = Subtarget.getXLenVT();
4912     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4913       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4914       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4915     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4916                Subtarget.hasStdExtF()) {
4917       SDValue FPConv =
4918           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4919       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4920     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4921                isTypeLegal(Op0VT)) {
4922       // Custom-legalize bitcasts from fixed-length vector types to illegal
4923       // scalar types in order to improve codegen. Bitcast the vector to a
4924       // one-element vector type whose element type is the same as the result
4925       // type, and extract the first element.
4926       LLVMContext &Context = *DAG.getContext();
4927       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4928       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4929                                     DAG.getConstant(0, DL, XLenVT)));
4930     }
4931     break;
4932   }
4933   case RISCVISD::GREV:
4934   case RISCVISD::GORC: {
4935     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4936            "Unexpected custom legalisation");
4937     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4938     // This is similar to customLegalizeToWOp, except that we pass the second
4939     // operand (a TargetConstant) straight through: it is already of type
4940     // XLenVT.
4941     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4942     SDValue NewOp0 =
4943         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4944     SDValue NewOp1 =
4945         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4946     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4947     // ReplaceNodeResults requires we maintain the same type for the return
4948     // value.
4949     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4950     break;
4951   }
4952   case RISCVISD::SHFL: {
4953     // There is no SHFLIW instruction, but we can just promote the operation.
4954     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4955            "Unexpected custom legalisation");
4956     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4957     SDValue NewOp0 =
4958         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4959     SDValue NewOp1 =
4960         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4961     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4962     // ReplaceNodeResults requires we maintain the same type for the return
4963     // value.
4964     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4965     break;
4966   }
4967   case ISD::BSWAP:
4968   case ISD::BITREVERSE: {
4969     MVT VT = N->getSimpleValueType(0);
4970     MVT XLenVT = Subtarget.getXLenVT();
4971     assert((VT == MVT::i8 || VT == MVT::i16 ||
4972             (VT == MVT::i32 && Subtarget.is64Bit())) &&
4973            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4974     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
4975     unsigned Imm = VT.getSizeInBits() - 1;
4976     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
4977     if (N->getOpcode() == ISD::BSWAP)
4978       Imm &= ~0x7U;
4979     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
4980     SDValue GREVI =
4981         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
4982     // ReplaceNodeResults requires we maintain the same type for the return
4983     // value.
4984     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
4985     break;
4986   }
4987   case ISD::FSHL:
4988   case ISD::FSHR: {
4989     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4990            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4991     SDValue NewOp0 =
4992         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4993     SDValue NewOp1 =
4994         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4995     SDValue NewOp2 =
4996         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4997     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4998     // Mask the shift amount to 5 bits.
4999     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5000                          DAG.getConstant(0x1f, DL, MVT::i64));
5001     unsigned Opc =
5002         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5003     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5004     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5005     break;
5006   }
5007   case ISD::EXTRACT_VECTOR_ELT: {
5008     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5009     // type is illegal (currently only vXi64 RV32).
5010     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5011     // transferred to the destination register. We issue two of these from the
5012     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5013     // first element.
5014     SDValue Vec = N->getOperand(0);
5015     SDValue Idx = N->getOperand(1);
5016 
5017     // The vector type hasn't been legalized yet so we can't issue target
5018     // specific nodes if it needs legalization.
5019     // FIXME: We would manually legalize if it's important.
5020     if (!isTypeLegal(Vec.getValueType()))
5021       return;
5022 
5023     MVT VecVT = Vec.getSimpleValueType();
5024 
5025     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5026            VecVT.getVectorElementType() == MVT::i64 &&
5027            "Unexpected EXTRACT_VECTOR_ELT legalization");
5028 
5029     // If this is a fixed vector, we need to convert it to a scalable vector.
5030     MVT ContainerVT = VecVT;
5031     if (VecVT.isFixedLengthVector()) {
5032       ContainerVT = getContainerForFixedLengthVector(VecVT);
5033       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5034     }
5035 
5036     MVT XLenVT = Subtarget.getXLenVT();
5037 
5038     // Use a VL of 1 to avoid processing more elements than we need.
5039     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5040     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5041     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5042 
5043     // Unless the index is known to be 0, we must slide the vector down to get
5044     // the desired element into index 0.
5045     if (!isNullConstant(Idx)) {
5046       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5047                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5048     }
5049 
5050     // Extract the lower XLEN bits of the correct vector element.
5051     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5052 
5053     // To extract the upper XLEN bits of the vector element, shift the first
5054     // element right by 32 bits and re-extract the lower XLEN bits.
5055     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5056                                      DAG.getConstant(32, DL, XLenVT), VL);
5057     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5058                                  ThirtyTwoV, Mask, VL);
5059 
5060     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5061 
5062     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5063     break;
5064   }
5065   case ISD::INTRINSIC_WO_CHAIN: {
5066     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5067     switch (IntNo) {
5068     default:
5069       llvm_unreachable(
5070           "Don't know how to custom type legalize this intrinsic!");
5071     case Intrinsic::riscv_orc_b: {
5072       // Lower to the GORCI encoding for orc.b with the operand extended.
5073       SDValue NewOp =
5074           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5075       // If Zbp is enabled, use GORCIW which will sign extend the result.
5076       unsigned Opc =
5077           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5078       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5079                                 DAG.getConstant(7, DL, MVT::i64));
5080       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5081       return;
5082     }
5083     case Intrinsic::riscv_grev:
5084     case Intrinsic::riscv_gorc: {
5085       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5086              "Unexpected custom legalisation");
5087       SDValue NewOp1 =
5088           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5089       SDValue NewOp2 =
5090           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5091       unsigned Opc =
5092           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5093       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5094       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5095       break;
5096     }
5097     case Intrinsic::riscv_shfl:
5098     case Intrinsic::riscv_unshfl: {
5099       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5100              "Unexpected custom legalisation");
5101       SDValue NewOp1 =
5102           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5103       SDValue NewOp2 =
5104           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5105       unsigned Opc =
5106           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5107       if (isa<ConstantSDNode>(N->getOperand(2))) {
5108         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5109                              DAG.getConstant(0xf, DL, MVT::i64));
5110         Opc =
5111             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5112       }
5113       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5114       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5115       break;
5116     }
5117     case Intrinsic::riscv_bcompress:
5118     case Intrinsic::riscv_bdecompress: {
5119       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5120              "Unexpected custom legalisation");
5121       SDValue NewOp1 =
5122           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5123       SDValue NewOp2 =
5124           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5125       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5126                          ? RISCVISD::BCOMPRESSW
5127                          : RISCVISD::BDECOMPRESSW;
5128       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5129       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5130       break;
5131     }
5132     case Intrinsic::riscv_vmv_x_s: {
5133       EVT VT = N->getValueType(0);
5134       MVT XLenVT = Subtarget.getXLenVT();
5135       if (VT.bitsLT(XLenVT)) {
5136         // Simple case just extract using vmv.x.s and truncate.
5137         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5138                                       Subtarget.getXLenVT(), N->getOperand(1));
5139         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5140         return;
5141       }
5142 
5143       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5144              "Unexpected custom legalization");
5145 
5146       // We need to do the move in two steps.
5147       SDValue Vec = N->getOperand(1);
5148       MVT VecVT = Vec.getSimpleValueType();
5149 
5150       // First extract the lower XLEN bits of the element.
5151       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5152 
5153       // To extract the upper XLEN bits of the vector element, shift the first
5154       // element right by 32 bits and re-extract the lower XLEN bits.
5155       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5156       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5157       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5158       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5159                                        DAG.getConstant(32, DL, XLenVT), VL);
5160       SDValue LShr32 =
5161           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5162       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5163 
5164       Results.push_back(
5165           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5166       break;
5167     }
5168     }
5169     break;
5170   }
5171   case ISD::VECREDUCE_ADD:
5172   case ISD::VECREDUCE_AND:
5173   case ISD::VECREDUCE_OR:
5174   case ISD::VECREDUCE_XOR:
5175   case ISD::VECREDUCE_SMAX:
5176   case ISD::VECREDUCE_UMAX:
5177   case ISD::VECREDUCE_SMIN:
5178   case ISD::VECREDUCE_UMIN:
5179     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5180       Results.push_back(V);
5181     break;
5182   case ISD::FLT_ROUNDS_: {
5183     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5184     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5185     Results.push_back(Res.getValue(0));
5186     Results.push_back(Res.getValue(1));
5187     break;
5188   }
5189   }
5190 }
5191 
5192 // A structure to hold one of the bit-manipulation patterns below. Together, a
5193 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5194 //   (or (and (shl x, 1), 0xAAAAAAAA),
5195 //       (and (srl x, 1), 0x55555555))
5196 struct RISCVBitmanipPat {
5197   SDValue Op;
5198   unsigned ShAmt;
5199   bool IsSHL;
5200 
5201   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5202     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5203   }
5204 };
5205 
5206 // Matches patterns of the form
5207 //   (and (shl x, C2), (C1 << C2))
5208 //   (and (srl x, C2), C1)
5209 //   (shl (and x, C1), C2)
5210 //   (srl (and x, (C1 << C2)), C2)
5211 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5212 // The expected masks for each shift amount are specified in BitmanipMasks where
5213 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5214 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5215 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5216 // XLen is 64.
5217 static Optional<RISCVBitmanipPat>
5218 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5219   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5220          "Unexpected number of masks");
5221   Optional<uint64_t> Mask;
5222   // Optionally consume a mask around the shift operation.
5223   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5224     Mask = Op.getConstantOperandVal(1);
5225     Op = Op.getOperand(0);
5226   }
5227   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5228     return None;
5229   bool IsSHL = Op.getOpcode() == ISD::SHL;
5230 
5231   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5232     return None;
5233   uint64_t ShAmt = Op.getConstantOperandVal(1);
5234 
5235   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5236   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5237     return None;
5238   // If we don't have enough masks for 64 bit, then we must be trying to
5239   // match SHFL so we're only allowed to shift 1/4 of the width.
5240   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5241     return None;
5242 
5243   SDValue Src = Op.getOperand(0);
5244 
5245   // The expected mask is shifted left when the AND is found around SHL
5246   // patterns.
5247   //   ((x >> 1) & 0x55555555)
5248   //   ((x << 1) & 0xAAAAAAAA)
5249   bool SHLExpMask = IsSHL;
5250 
5251   if (!Mask) {
5252     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5253     // the mask is all ones: consume that now.
5254     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5255       Mask = Src.getConstantOperandVal(1);
5256       Src = Src.getOperand(0);
5257       // The expected mask is now in fact shifted left for SRL, so reverse the
5258       // decision.
5259       //   ((x & 0xAAAAAAAA) >> 1)
5260       //   ((x & 0x55555555) << 1)
5261       SHLExpMask = !SHLExpMask;
5262     } else {
5263       // Use a default shifted mask of all-ones if there's no AND, truncated
5264       // down to the expected width. This simplifies the logic later on.
5265       Mask = maskTrailingOnes<uint64_t>(Width);
5266       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5267     }
5268   }
5269 
5270   unsigned MaskIdx = Log2_32(ShAmt);
5271   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5272 
5273   if (SHLExpMask)
5274     ExpMask <<= ShAmt;
5275 
5276   if (Mask != ExpMask)
5277     return None;
5278 
5279   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5280 }
5281 
5282 // Matches any of the following bit-manipulation patterns:
5283 //   (and (shl x, 1), (0x55555555 << 1))
5284 //   (and (srl x, 1), 0x55555555)
5285 //   (shl (and x, 0x55555555), 1)
5286 //   (srl (and x, (0x55555555 << 1)), 1)
5287 // where the shift amount and mask may vary thus:
5288 //   [1]  = 0x55555555 / 0xAAAAAAAA
5289 //   [2]  = 0x33333333 / 0xCCCCCCCC
5290 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5291 //   [8]  = 0x00FF00FF / 0xFF00FF00
5292 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5293 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5294 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5295   // These are the unshifted masks which we use to match bit-manipulation
5296   // patterns. They may be shifted left in certain circumstances.
5297   static const uint64_t BitmanipMasks[] = {
5298       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5299       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5300 
5301   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5302 }
5303 
5304 // Match the following pattern as a GREVI(W) operation
5305 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5306 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5307                                const RISCVSubtarget &Subtarget) {
5308   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5309   EVT VT = Op.getValueType();
5310 
5311   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5312     auto LHS = matchGREVIPat(Op.getOperand(0));
5313     auto RHS = matchGREVIPat(Op.getOperand(1));
5314     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5315       SDLoc DL(Op);
5316       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5317                          DAG.getConstant(LHS->ShAmt, DL, VT));
5318     }
5319   }
5320   return SDValue();
5321 }
5322 
5323 // Matches any the following pattern as a GORCI(W) operation
5324 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5325 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5326 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5327 // Note that with the variant of 3.,
5328 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5329 // the inner pattern will first be matched as GREVI and then the outer
5330 // pattern will be matched to GORC via the first rule above.
5331 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5332 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5333                                const RISCVSubtarget &Subtarget) {
5334   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5335   EVT VT = Op.getValueType();
5336 
5337   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5338     SDLoc DL(Op);
5339     SDValue Op0 = Op.getOperand(0);
5340     SDValue Op1 = Op.getOperand(1);
5341 
5342     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5343       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5344           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5345           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5346         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5347       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5348       if ((Reverse.getOpcode() == ISD::ROTL ||
5349            Reverse.getOpcode() == ISD::ROTR) &&
5350           Reverse.getOperand(0) == X &&
5351           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5352         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5353         if (RotAmt == (VT.getSizeInBits() / 2))
5354           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5355                              DAG.getConstant(RotAmt, DL, VT));
5356       }
5357       return SDValue();
5358     };
5359 
5360     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5361     if (SDValue V = MatchOROfReverse(Op0, Op1))
5362       return V;
5363     if (SDValue V = MatchOROfReverse(Op1, Op0))
5364       return V;
5365 
5366     // OR is commutable so canonicalize its OR operand to the left
5367     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5368       std::swap(Op0, Op1);
5369     if (Op0.getOpcode() != ISD::OR)
5370       return SDValue();
5371     SDValue OrOp0 = Op0.getOperand(0);
5372     SDValue OrOp1 = Op0.getOperand(1);
5373     auto LHS = matchGREVIPat(OrOp0);
5374     // OR is commutable so swap the operands and try again: x might have been
5375     // on the left
5376     if (!LHS) {
5377       std::swap(OrOp0, OrOp1);
5378       LHS = matchGREVIPat(OrOp0);
5379     }
5380     auto RHS = matchGREVIPat(Op1);
5381     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5382       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5383                          DAG.getConstant(LHS->ShAmt, DL, VT));
5384     }
5385   }
5386   return SDValue();
5387 }
5388 
5389 // Matches any of the following bit-manipulation patterns:
5390 //   (and (shl x, 1), (0x22222222 << 1))
5391 //   (and (srl x, 1), 0x22222222)
5392 //   (shl (and x, 0x22222222), 1)
5393 //   (srl (and x, (0x22222222 << 1)), 1)
5394 // where the shift amount and mask may vary thus:
5395 //   [1]  = 0x22222222 / 0x44444444
5396 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5397 //   [4]  = 0x00F000F0 / 0x0F000F00
5398 //   [8]  = 0x0000FF00 / 0x00FF0000
5399 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5400 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5401   // These are the unshifted masks which we use to match bit-manipulation
5402   // patterns. They may be shifted left in certain circumstances.
5403   static const uint64_t BitmanipMasks[] = {
5404       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5405       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5406 
5407   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5408 }
5409 
5410 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5411 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5412                                const RISCVSubtarget &Subtarget) {
5413   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5414   EVT VT = Op.getValueType();
5415 
5416   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5417     return SDValue();
5418 
5419   SDValue Op0 = Op.getOperand(0);
5420   SDValue Op1 = Op.getOperand(1);
5421 
5422   // Or is commutable so canonicalize the second OR to the LHS.
5423   if (Op0.getOpcode() != ISD::OR)
5424     std::swap(Op0, Op1);
5425   if (Op0.getOpcode() != ISD::OR)
5426     return SDValue();
5427 
5428   // We found an inner OR, so our operands are the operands of the inner OR
5429   // and the other operand of the outer OR.
5430   SDValue A = Op0.getOperand(0);
5431   SDValue B = Op0.getOperand(1);
5432   SDValue C = Op1;
5433 
5434   auto Match1 = matchSHFLPat(A);
5435   auto Match2 = matchSHFLPat(B);
5436 
5437   // If neither matched, we failed.
5438   if (!Match1 && !Match2)
5439     return SDValue();
5440 
5441   // We had at least one match. if one failed, try the remaining C operand.
5442   if (!Match1) {
5443     std::swap(A, C);
5444     Match1 = matchSHFLPat(A);
5445     if (!Match1)
5446       return SDValue();
5447   } else if (!Match2) {
5448     std::swap(B, C);
5449     Match2 = matchSHFLPat(B);
5450     if (!Match2)
5451       return SDValue();
5452   }
5453   assert(Match1 && Match2);
5454 
5455   // Make sure our matches pair up.
5456   if (!Match1->formsPairWith(*Match2))
5457     return SDValue();
5458 
5459   // All the remains is to make sure C is an AND with the same input, that masks
5460   // out the bits that are being shuffled.
5461   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5462       C.getOperand(0) != Match1->Op)
5463     return SDValue();
5464 
5465   uint64_t Mask = C.getConstantOperandVal(1);
5466 
5467   static const uint64_t BitmanipMasks[] = {
5468       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5469       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5470   };
5471 
5472   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5473   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5474   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5475 
5476   if (Mask != ExpMask)
5477     return SDValue();
5478 
5479   SDLoc DL(Op);
5480   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5481                      DAG.getConstant(Match1->ShAmt, DL, VT));
5482 }
5483 
5484 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5485 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5486 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5487 // not undo itself, but they are redundant.
5488 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5489   SDValue Src = N->getOperand(0);
5490 
5491   if (Src.getOpcode() != N->getOpcode())
5492     return SDValue();
5493 
5494   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5495       !isa<ConstantSDNode>(Src.getOperand(1)))
5496     return SDValue();
5497 
5498   unsigned ShAmt1 = N->getConstantOperandVal(1);
5499   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5500   Src = Src.getOperand(0);
5501 
5502   unsigned CombinedShAmt;
5503   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5504     CombinedShAmt = ShAmt1 | ShAmt2;
5505   else
5506     CombinedShAmt = ShAmt1 ^ ShAmt2;
5507 
5508   if (CombinedShAmt == 0)
5509     return Src;
5510 
5511   SDLoc DL(N);
5512   return DAG.getNode(
5513       N->getOpcode(), DL, N->getValueType(0), Src,
5514       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5515 }
5516 
5517 // Combine a constant select operand into its use:
5518 //
5519 // (and (select_cc lhs, rhs, cc, -1, c), x)
5520 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5521 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5522 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5523 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5524 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5525 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5526                                      SelectionDAG &DAG, bool AllOnes) {
5527   EVT VT = N->getValueType(0);
5528 
5529   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5530     return SDValue();
5531 
5532   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5533     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5534   };
5535 
5536   bool SwapSelectOps;
5537   SDValue TrueVal = Slct.getOperand(3);
5538   SDValue FalseVal = Slct.getOperand(4);
5539   SDValue NonConstantVal;
5540   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5541     SwapSelectOps = false;
5542     NonConstantVal = FalseVal;
5543   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5544     SwapSelectOps = true;
5545     NonConstantVal = TrueVal;
5546   } else
5547     return SDValue();
5548 
5549   // Slct is now know to be the desired identity constant when CC is true.
5550   TrueVal = OtherOp;
5551   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5552   // Unless SwapSelectOps says CC should be false.
5553   if (SwapSelectOps)
5554     std::swap(TrueVal, FalseVal);
5555 
5556   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5557                      {Slct.getOperand(0), Slct.getOperand(1),
5558                       Slct.getOperand(2), TrueVal, FalseVal});
5559 }
5560 
5561 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5562 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5563                                                 bool AllOnes) {
5564   SDValue N0 = N->getOperand(0);
5565   SDValue N1 = N->getOperand(1);
5566   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5567     return Result;
5568   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5569     return Result;
5570   return SDValue();
5571 }
5572 
5573 static SDValue performANDCombine(SDNode *N,
5574                                  TargetLowering::DAGCombinerInfo &DCI,
5575                                  const RISCVSubtarget &Subtarget) {
5576   SelectionDAG &DAG = DCI.DAG;
5577 
5578   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5579   //      (select lhs, rhs, cc, x, (and x, y))
5580   return combineSelectCCAndUseCommutative(N, DAG, true);
5581 }
5582 
5583 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5584                                 const RISCVSubtarget &Subtarget) {
5585   SelectionDAG &DAG = DCI.DAG;
5586   if (Subtarget.hasStdExtZbp()) {
5587     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5588       return GREV;
5589     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5590       return GORC;
5591     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5592       return SHFL;
5593   }
5594 
5595   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5596   //      (select lhs, rhs, cc, x, (or x, y))
5597   return combineSelectCCAndUseCommutative(N, DAG, false);
5598 }
5599 
5600 static SDValue performXORCombine(SDNode *N,
5601                                  TargetLowering::DAGCombinerInfo &DCI,
5602                                  const RISCVSubtarget &Subtarget) {
5603   SelectionDAG &DAG = DCI.DAG;
5604 
5605   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5606   //      (select lhs, rhs, cc, x, (xor x, y))
5607   return combineSelectCCAndUseCommutative(N, DAG, false);
5608 }
5609 
5610 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
5611 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
5612 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
5613 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
5614 // ADDW/SUBW/MULW.
5615 static SDValue performANY_EXTENDCombine(SDNode *N,
5616                                         TargetLowering::DAGCombinerInfo &DCI,
5617                                         const RISCVSubtarget &Subtarget) {
5618   if (!Subtarget.is64Bit())
5619     return SDValue();
5620 
5621   SelectionDAG &DAG = DCI.DAG;
5622 
5623   SDValue Src = N->getOperand(0);
5624   EVT VT = N->getValueType(0);
5625   if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
5626     return SDValue();
5627 
5628   // The opcode must be one that can implicitly sign_extend.
5629   // FIXME: Additional opcodes.
5630   switch (Src.getOpcode()) {
5631   default:
5632     return SDValue();
5633   case ISD::MUL:
5634     if (!Subtarget.hasStdExtM())
5635       return SDValue();
5636     LLVM_FALLTHROUGH;
5637   case ISD::ADD:
5638   case ISD::SUB:
5639     break;
5640   }
5641 
5642   SmallVector<SDNode *, 4> SetCCs;
5643   for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
5644                             UE = Src.getNode()->use_end();
5645        UI != UE; ++UI) {
5646     SDNode *User = *UI;
5647     if (User == N)
5648       continue;
5649     if (UI.getUse().getResNo() != Src.getResNo())
5650       continue;
5651     // All i32 setccs are legalized by sign extending operands.
5652     if (User->getOpcode() == ISD::SETCC) {
5653       SetCCs.push_back(User);
5654       continue;
5655     }
5656     // We don't know if we can extend this user.
5657     break;
5658   }
5659 
5660   // If we don't have any SetCCs, this isn't worthwhile.
5661   if (SetCCs.empty())
5662     return SDValue();
5663 
5664   SDLoc DL(N);
5665   SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
5666   DCI.CombineTo(N, SExt);
5667 
5668   // Promote all the setccs.
5669   for (SDNode *SetCC : SetCCs) {
5670     SmallVector<SDValue, 4> Ops;
5671 
5672     for (unsigned j = 0; j != 2; ++j) {
5673       SDValue SOp = SetCC->getOperand(j);
5674       if (SOp == Src)
5675         Ops.push_back(SExt);
5676       else
5677         Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
5678     }
5679 
5680     Ops.push_back(SetCC->getOperand(2));
5681     DCI.CombineTo(SetCC,
5682                   DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
5683   }
5684   return SDValue(N, 0);
5685 }
5686 
5687 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5688                                                DAGCombinerInfo &DCI) const {
5689   SelectionDAG &DAG = DCI.DAG;
5690 
5691   switch (N->getOpcode()) {
5692   default:
5693     break;
5694   case RISCVISD::SplitF64: {
5695     SDValue Op0 = N->getOperand(0);
5696     // If the input to SplitF64 is just BuildPairF64 then the operation is
5697     // redundant. Instead, use BuildPairF64's operands directly.
5698     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5699       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5700 
5701     SDLoc DL(N);
5702 
5703     // It's cheaper to materialise two 32-bit integers than to load a double
5704     // from the constant pool and transfer it to integer registers through the
5705     // stack.
5706     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5707       APInt V = C->getValueAPF().bitcastToAPInt();
5708       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5709       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5710       return DCI.CombineTo(N, Lo, Hi);
5711     }
5712 
5713     // This is a target-specific version of a DAGCombine performed in
5714     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5715     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5716     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5717     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5718         !Op0.getNode()->hasOneUse())
5719       break;
5720     SDValue NewSplitF64 =
5721         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5722                     Op0.getOperand(0));
5723     SDValue Lo = NewSplitF64.getValue(0);
5724     SDValue Hi = NewSplitF64.getValue(1);
5725     APInt SignBit = APInt::getSignMask(32);
5726     if (Op0.getOpcode() == ISD::FNEG) {
5727       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5728                                   DAG.getConstant(SignBit, DL, MVT::i32));
5729       return DCI.CombineTo(N, Lo, NewHi);
5730     }
5731     assert(Op0.getOpcode() == ISD::FABS);
5732     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5733                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5734     return DCI.CombineTo(N, Lo, NewHi);
5735   }
5736   case RISCVISD::SLLW:
5737   case RISCVISD::SRAW:
5738   case RISCVISD::SRLW:
5739   case RISCVISD::ROLW:
5740   case RISCVISD::RORW: {
5741     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5742     SDValue LHS = N->getOperand(0);
5743     SDValue RHS = N->getOperand(1);
5744     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5745     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5746     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5747         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5748       if (N->getOpcode() != ISD::DELETED_NODE)
5749         DCI.AddToWorklist(N);
5750       return SDValue(N, 0);
5751     }
5752     break;
5753   }
5754   case RISCVISD::CLZW:
5755   case RISCVISD::CTZW: {
5756     // Only the lower 32 bits of the first operand are read
5757     SDValue Op0 = N->getOperand(0);
5758     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5759     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5760       if (N->getOpcode() != ISD::DELETED_NODE)
5761         DCI.AddToWorklist(N);
5762       return SDValue(N, 0);
5763     }
5764     break;
5765   }
5766   case RISCVISD::FSL:
5767   case RISCVISD::FSR: {
5768     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5769     SDValue ShAmt = N->getOperand(2);
5770     unsigned BitWidth = ShAmt.getValueSizeInBits();
5771     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5772     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5773     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5774       if (N->getOpcode() != ISD::DELETED_NODE)
5775         DCI.AddToWorklist(N);
5776       return SDValue(N, 0);
5777     }
5778     break;
5779   }
5780   case RISCVISD::FSLW:
5781   case RISCVISD::FSRW: {
5782     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5783     // read.
5784     SDValue Op0 = N->getOperand(0);
5785     SDValue Op1 = N->getOperand(1);
5786     SDValue ShAmt = N->getOperand(2);
5787     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5788     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5789     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5790         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5791         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5792       if (N->getOpcode() != ISD::DELETED_NODE)
5793         DCI.AddToWorklist(N);
5794       return SDValue(N, 0);
5795     }
5796     break;
5797   }
5798   case RISCVISD::GREV:
5799   case RISCVISD::GORC: {
5800     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5801     SDValue ShAmt = N->getOperand(1);
5802     unsigned BitWidth = ShAmt.getValueSizeInBits();
5803     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5804     APInt ShAmtMask(BitWidth, BitWidth - 1);
5805     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5806       if (N->getOpcode() != ISD::DELETED_NODE)
5807         DCI.AddToWorklist(N);
5808       return SDValue(N, 0);
5809     }
5810 
5811     return combineGREVI_GORCI(N, DCI.DAG);
5812   }
5813   case RISCVISD::GREVW:
5814   case RISCVISD::GORCW: {
5815     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5816     SDValue LHS = N->getOperand(0);
5817     SDValue RHS = N->getOperand(1);
5818     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5819     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5820     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5821         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5822       if (N->getOpcode() != ISD::DELETED_NODE)
5823         DCI.AddToWorklist(N);
5824       return SDValue(N, 0);
5825     }
5826 
5827     return combineGREVI_GORCI(N, DCI.DAG);
5828   }
5829   case RISCVISD::SHFL:
5830   case RISCVISD::UNSHFL: {
5831     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5832     SDValue ShAmt = N->getOperand(1);
5833     unsigned BitWidth = ShAmt.getValueSizeInBits();
5834     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5835     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5836     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5837       if (N->getOpcode() != ISD::DELETED_NODE)
5838         DCI.AddToWorklist(N);
5839       return SDValue(N, 0);
5840     }
5841 
5842     break;
5843   }
5844   case RISCVISD::SHFLW:
5845   case RISCVISD::UNSHFLW: {
5846     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5847     SDValue LHS = N->getOperand(0);
5848     SDValue RHS = N->getOperand(1);
5849     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5850     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5851     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5852         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5853       if (N->getOpcode() != ISD::DELETED_NODE)
5854         DCI.AddToWorklist(N);
5855       return SDValue(N, 0);
5856     }
5857 
5858     break;
5859   }
5860   case RISCVISD::BCOMPRESSW:
5861   case RISCVISD::BDECOMPRESSW: {
5862     // Only the lower 32 bits of LHS and RHS are read.
5863     SDValue LHS = N->getOperand(0);
5864     SDValue RHS = N->getOperand(1);
5865     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5866     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5867         SimplifyDemandedBits(RHS, Mask, DCI)) {
5868       if (N->getOpcode() != ISD::DELETED_NODE)
5869         DCI.AddToWorklist(N);
5870       return SDValue(N, 0);
5871     }
5872 
5873     break;
5874   }
5875   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5876     SDLoc DL(N);
5877     SDValue Op0 = N->getOperand(0);
5878     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5879     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5880     // of the FMV_W_X_RV64 operand.
5881     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5882       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5883              "Unexpected value type!");
5884       return Op0.getOperand(0);
5885     }
5886 
5887     // This is a target-specific version of a DAGCombine performed in
5888     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5889     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5890     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5891     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5892         !Op0.getNode()->hasOneUse())
5893       break;
5894     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5895                                  Op0.getOperand(0));
5896     APInt SignBit = APInt::getSignMask(32).sext(64);
5897     if (Op0.getOpcode() == ISD::FNEG)
5898       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5899                          DAG.getConstant(SignBit, DL, MVT::i64));
5900 
5901     assert(Op0.getOpcode() == ISD::FABS);
5902     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5903                        DAG.getConstant(~SignBit, DL, MVT::i64));
5904   }
5905   case ISD::AND:
5906     return performANDCombine(N, DCI, Subtarget);
5907   case ISD::OR:
5908     return performORCombine(N, DCI, Subtarget);
5909   case ISD::XOR:
5910     return performXORCombine(N, DCI, Subtarget);
5911   case ISD::ANY_EXTEND:
5912     return performANY_EXTENDCombine(N, DCI, Subtarget);
5913   case RISCVISD::SELECT_CC: {
5914     // Transform
5915     SDValue LHS = N->getOperand(0);
5916     SDValue RHS = N->getOperand(1);
5917     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5918     if (!ISD::isIntEqualitySetCC(CCVal))
5919       break;
5920 
5921     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5922     //      (select_cc X, Y, lt, trueV, falseV)
5923     // Sometimes the setcc is introduced after select_cc has been formed.
5924     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5925         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5926       // If we're looking for eq 0 instead of ne 0, we need to invert the
5927       // condition.
5928       bool Invert = CCVal == ISD::SETEQ;
5929       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5930       if (Invert)
5931         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5932 
5933       SDLoc DL(N);
5934       RHS = LHS.getOperand(1);
5935       LHS = LHS.getOperand(0);
5936       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5937 
5938       SDValue TargetCC =
5939           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5940       return DAG.getNode(
5941           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5942           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5943     }
5944 
5945     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5946     //      (select_cc X, Y, eq/ne, trueV, falseV)
5947     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5948       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5949                          {LHS.getOperand(0), LHS.getOperand(1),
5950                           N->getOperand(2), N->getOperand(3),
5951                           N->getOperand(4)});
5952     // (select_cc X, 1, setne, trueV, falseV) ->
5953     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5954     // This can occur when legalizing some floating point comparisons.
5955     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5956     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5957       SDLoc DL(N);
5958       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5959       SDValue TargetCC =
5960           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5961       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5962       return DAG.getNode(
5963           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5964           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5965     }
5966 
5967     break;
5968   }
5969   case RISCVISD::BR_CC: {
5970     SDValue LHS = N->getOperand(1);
5971     SDValue RHS = N->getOperand(2);
5972     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5973     if (!ISD::isIntEqualitySetCC(CCVal))
5974       break;
5975 
5976     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5977     //      (br_cc X, Y, lt, dest)
5978     // Sometimes the setcc is introduced after br_cc has been formed.
5979     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5980         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5981       // If we're looking for eq 0 instead of ne 0, we need to invert the
5982       // condition.
5983       bool Invert = CCVal == ISD::SETEQ;
5984       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5985       if (Invert)
5986         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5987 
5988       SDLoc DL(N);
5989       RHS = LHS.getOperand(1);
5990       LHS = LHS.getOperand(0);
5991       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5992 
5993       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5994                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5995                          N->getOperand(4));
5996     }
5997 
5998     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5999     //      (br_cc X, Y, eq/ne, trueV, falseV)
6000     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6001       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6002                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6003                          N->getOperand(3), N->getOperand(4));
6004 
6005     // (br_cc X, 1, setne, br_cc) ->
6006     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6007     // This can occur when legalizing some floating point comparisons.
6008     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6009     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6010       SDLoc DL(N);
6011       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6012       SDValue TargetCC = DAG.getCondCode(CCVal);
6013       RHS = DAG.getConstant(0, DL, LHS.getValueType());
6014       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6015                          N->getOperand(0), LHS, RHS, TargetCC,
6016                          N->getOperand(4));
6017     }
6018     break;
6019   }
6020   case ISD::FCOPYSIGN: {
6021     EVT VT = N->getValueType(0);
6022     if (!VT.isVector())
6023       break;
6024     // There is a form of VFSGNJ which injects the negated sign of its second
6025     // operand. Try and bubble any FNEG up after the extend/round to produce
6026     // this optimized pattern. Avoid modifying cases where FP_ROUND and
6027     // TRUNC=1.
6028     SDValue In2 = N->getOperand(1);
6029     // Avoid cases where the extend/round has multiple uses, as duplicating
6030     // those is typically more expensive than removing a fneg.
6031     if (!In2.hasOneUse())
6032       break;
6033     if (In2.getOpcode() != ISD::FP_EXTEND &&
6034         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6035       break;
6036     In2 = In2.getOperand(0);
6037     if (In2.getOpcode() != ISD::FNEG)
6038       break;
6039     SDLoc DL(N);
6040     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
6041     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
6042                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
6043   }
6044   case ISD::MGATHER:
6045   case ISD::MSCATTER: {
6046     if (!DCI.isBeforeLegalize())
6047       break;
6048     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
6049     SDValue Index = MGSN->getIndex();
6050     EVT IndexVT = Index.getValueType();
6051     MVT XLenVT = Subtarget.getXLenVT();
6052     // RISCV indexed loads only support the "unsigned unscaled" addressing
6053     // mode, so anything else must be manually legalized.
6054     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
6055                                 (MGSN->isIndexSigned() &&
6056                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
6057     if (!NeedsIdxLegalization)
6058       break;
6059 
6060     SDLoc DL(N);
6061 
6062     // Any index legalization should first promote to XLenVT, so we don't lose
6063     // bits when scaling. This may create an illegal index type so we let
6064     // LLVM's legalization take care of the splitting.
6065     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
6066       IndexVT = IndexVT.changeVectorElementType(XLenVT);
6067       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
6068                                                 : ISD::ZERO_EXTEND,
6069                           DL, IndexVT, Index);
6070     }
6071 
6072     unsigned Scale = N->getConstantOperandVal(5);
6073     if (MGSN->isIndexScaled() && Scale != 1) {
6074       // Manually scale the indices by the element size.
6075       // TODO: Sanitize the scale operand here?
6076       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
6077       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
6078       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
6079     }
6080 
6081     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6082     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6083       return DAG.getMaskedGather(
6084           N->getVTList(), MGSN->getMemoryVT(), DL,
6085           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6086            MGSN->getBasePtr(), Index, MGN->getScale()},
6087           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6088     }
6089     const auto *MSN = cast<MaskedScatterSDNode>(N);
6090     return DAG.getMaskedScatter(
6091         N->getVTList(), MGSN->getMemoryVT(), DL,
6092         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6093          Index, MGSN->getScale()},
6094         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6095   }
6096   case RISCVISD::SRA_VL:
6097   case RISCVISD::SRL_VL:
6098   case RISCVISD::SHL_VL: {
6099     SDValue ShAmt = N->getOperand(1);
6100     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6101       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6102       SDLoc DL(N);
6103       SDValue VL = N->getOperand(3);
6104       EVT VT = N->getValueType(0);
6105       ShAmt =
6106           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6107       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6108                          N->getOperand(2), N->getOperand(3));
6109     }
6110     break;
6111   }
6112   case ISD::SRA:
6113   case ISD::SRL:
6114   case ISD::SHL: {
6115     SDValue ShAmt = N->getOperand(1);
6116     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6117       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6118       SDLoc DL(N);
6119       EVT VT = N->getValueType(0);
6120       ShAmt =
6121           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6122       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6123     }
6124     break;
6125   }
6126   }
6127 
6128   return SDValue();
6129 }
6130 
6131 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6132     const SDNode *N, CombineLevel Level) const {
6133   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6134   // materialised in fewer instructions than `(OP _, c1)`:
6135   //
6136   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6137   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6138   SDValue N0 = N->getOperand(0);
6139   EVT Ty = N0.getValueType();
6140   if (Ty.isScalarInteger() &&
6141       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6142     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6143     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6144     if (C1 && C2) {
6145       const APInt &C1Int = C1->getAPIntValue();
6146       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6147 
6148       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6149       // and the combine should happen, to potentially allow further combines
6150       // later.
6151       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6152           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6153         return true;
6154 
6155       // We can materialise `c1` in an add immediate, so it's "free", and the
6156       // combine should be prevented.
6157       if (C1Int.getMinSignedBits() <= 64 &&
6158           isLegalAddImmediate(C1Int.getSExtValue()))
6159         return false;
6160 
6161       // Neither constant will fit into an immediate, so find materialisation
6162       // costs.
6163       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6164                                               Subtarget.is64Bit());
6165       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6166           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
6167 
6168       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6169       // combine should be prevented.
6170       if (C1Cost < ShiftedC1Cost)
6171         return false;
6172     }
6173   }
6174   return true;
6175 }
6176 
6177 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6178     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6179     TargetLoweringOpt &TLO) const {
6180   // Delay this optimization as late as possible.
6181   if (!TLO.LegalOps)
6182     return false;
6183 
6184   EVT VT = Op.getValueType();
6185   if (VT.isVector())
6186     return false;
6187 
6188   // Only handle AND for now.
6189   if (Op.getOpcode() != ISD::AND)
6190     return false;
6191 
6192   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6193   if (!C)
6194     return false;
6195 
6196   const APInt &Mask = C->getAPIntValue();
6197 
6198   // Clear all non-demanded bits initially.
6199   APInt ShrunkMask = Mask & DemandedBits;
6200 
6201   // Try to make a smaller immediate by setting undemanded bits.
6202 
6203   APInt ExpandedMask = Mask | ~DemandedBits;
6204 
6205   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6206     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6207   };
6208   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6209     if (NewMask == Mask)
6210       return true;
6211     SDLoc DL(Op);
6212     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6213     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6214     return TLO.CombineTo(Op, NewOp);
6215   };
6216 
6217   // If the shrunk mask fits in sign extended 12 bits, let the target
6218   // independent code apply it.
6219   if (ShrunkMask.isSignedIntN(12))
6220     return false;
6221 
6222   // Preserve (and X, 0xffff) when zext.h is supported.
6223   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6224     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6225     if (IsLegalMask(NewMask))
6226       return UseMask(NewMask);
6227   }
6228 
6229   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6230   if (VT == MVT::i64) {
6231     APInt NewMask = APInt(64, 0xffffffff);
6232     if (IsLegalMask(NewMask))
6233       return UseMask(NewMask);
6234   }
6235 
6236   // For the remaining optimizations, we need to be able to make a negative
6237   // number through a combination of mask and undemanded bits.
6238   if (!ExpandedMask.isNegative())
6239     return false;
6240 
6241   // What is the fewest number of bits we need to represent the negative number.
6242   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6243 
6244   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6245   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6246   APInt NewMask = ShrunkMask;
6247   if (MinSignedBits <= 12)
6248     NewMask.setBitsFrom(11);
6249   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6250     NewMask.setBitsFrom(31);
6251   else
6252     return false;
6253 
6254   // Sanity check that our new mask is a subset of the demanded mask.
6255   assert(IsLegalMask(NewMask));
6256   return UseMask(NewMask);
6257 }
6258 
6259 static void computeGREV(APInt &Src, unsigned ShAmt) {
6260   ShAmt &= Src.getBitWidth() - 1;
6261   uint64_t x = Src.getZExtValue();
6262   if (ShAmt & 1)
6263     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6264   if (ShAmt & 2)
6265     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6266   if (ShAmt & 4)
6267     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6268   if (ShAmt & 8)
6269     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6270   if (ShAmt & 16)
6271     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6272   if (ShAmt & 32)
6273     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6274   Src = x;
6275 }
6276 
6277 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6278                                                         KnownBits &Known,
6279                                                         const APInt &DemandedElts,
6280                                                         const SelectionDAG &DAG,
6281                                                         unsigned Depth) const {
6282   unsigned BitWidth = Known.getBitWidth();
6283   unsigned Opc = Op.getOpcode();
6284   assert((Opc >= ISD::BUILTIN_OP_END ||
6285           Opc == ISD::INTRINSIC_WO_CHAIN ||
6286           Opc == ISD::INTRINSIC_W_CHAIN ||
6287           Opc == ISD::INTRINSIC_VOID) &&
6288          "Should use MaskedValueIsZero if you don't know whether Op"
6289          " is a target node!");
6290 
6291   Known.resetAll();
6292   switch (Opc) {
6293   default: break;
6294   case RISCVISD::SELECT_CC: {
6295     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6296     // If we don't know any bits, early out.
6297     if (Known.isUnknown())
6298       break;
6299     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6300 
6301     // Only known if known in both the LHS and RHS.
6302     Known = KnownBits::commonBits(Known, Known2);
6303     break;
6304   }
6305   case RISCVISD::REMUW: {
6306     KnownBits Known2;
6307     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6308     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6309     // We only care about the lower 32 bits.
6310     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6311     // Restore the original width by sign extending.
6312     Known = Known.sext(BitWidth);
6313     break;
6314   }
6315   case RISCVISD::DIVUW: {
6316     KnownBits Known2;
6317     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6318     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6319     // We only care about the lower 32 bits.
6320     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6321     // Restore the original width by sign extending.
6322     Known = Known.sext(BitWidth);
6323     break;
6324   }
6325   case RISCVISD::CTZW: {
6326     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6327     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6328     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6329     Known.Zero.setBitsFrom(LowBits);
6330     break;
6331   }
6332   case RISCVISD::CLZW: {
6333     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6334     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6335     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6336     Known.Zero.setBitsFrom(LowBits);
6337     break;
6338   }
6339   case RISCVISD::GREV:
6340   case RISCVISD::GREVW: {
6341     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6342       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6343       if (Opc == RISCVISD::GREVW)
6344         Known = Known.trunc(32);
6345       unsigned ShAmt = C->getZExtValue();
6346       computeGREV(Known.Zero, ShAmt);
6347       computeGREV(Known.One, ShAmt);
6348       if (Opc == RISCVISD::GREVW)
6349         Known = Known.sext(BitWidth);
6350     }
6351     break;
6352   }
6353   case RISCVISD::READ_VLENB:
6354     // We assume VLENB is at least 16 bytes.
6355     Known.Zero.setLowBits(4);
6356     break;
6357   case ISD::INTRINSIC_W_CHAIN: {
6358     unsigned IntNo = Op.getConstantOperandVal(1);
6359     switch (IntNo) {
6360     default:
6361       // We can't do anything for most intrinsics.
6362       break;
6363     case Intrinsic::riscv_vsetvli:
6364     case Intrinsic::riscv_vsetvlimax:
6365       // Assume that VL output is positive and would fit in an int32_t.
6366       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6367       if (BitWidth >= 32)
6368         Known.Zero.setBitsFrom(31);
6369       break;
6370     }
6371     break;
6372   }
6373   }
6374 }
6375 
6376 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6377     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6378     unsigned Depth) const {
6379   switch (Op.getOpcode()) {
6380   default:
6381     break;
6382   case RISCVISD::SLLW:
6383   case RISCVISD::SRAW:
6384   case RISCVISD::SRLW:
6385   case RISCVISD::DIVW:
6386   case RISCVISD::DIVUW:
6387   case RISCVISD::REMUW:
6388   case RISCVISD::ROLW:
6389   case RISCVISD::RORW:
6390   case RISCVISD::GREVW:
6391   case RISCVISD::GORCW:
6392   case RISCVISD::FSLW:
6393   case RISCVISD::FSRW:
6394   case RISCVISD::SHFLW:
6395   case RISCVISD::UNSHFLW:
6396   case RISCVISD::BCOMPRESSW:
6397   case RISCVISD::BDECOMPRESSW:
6398     // TODO: As the result is sign-extended, this is conservatively correct. A
6399     // more precise answer could be calculated for SRAW depending on known
6400     // bits in the shift amount.
6401     return 33;
6402   case RISCVISD::SHFL:
6403   case RISCVISD::UNSHFL: {
6404     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6405     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6406     // will stay within the upper 32 bits. If there were more than 32 sign bits
6407     // before there will be at least 33 sign bits after.
6408     if (Op.getValueType() == MVT::i64 &&
6409         isa<ConstantSDNode>(Op.getOperand(1)) &&
6410         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6411       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6412       if (Tmp > 32)
6413         return 33;
6414     }
6415     break;
6416   }
6417   case RISCVISD::VMV_X_S:
6418     // The number of sign bits of the scalar result is computed by obtaining the
6419     // element type of the input vector operand, subtracting its width from the
6420     // XLEN, and then adding one (sign bit within the element type). If the
6421     // element type is wider than XLen, the least-significant XLEN bits are
6422     // taken.
6423     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6424       return 1;
6425     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6426   }
6427 
6428   return 1;
6429 }
6430 
6431 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6432                                                   MachineBasicBlock *BB) {
6433   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6434 
6435   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6436   // Should the count have wrapped while it was being read, we need to try
6437   // again.
6438   // ...
6439   // read:
6440   // rdcycleh x3 # load high word of cycle
6441   // rdcycle  x2 # load low word of cycle
6442   // rdcycleh x4 # load high word of cycle
6443   // bne x3, x4, read # check if high word reads match, otherwise try again
6444   // ...
6445 
6446   MachineFunction &MF = *BB->getParent();
6447   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6448   MachineFunction::iterator It = ++BB->getIterator();
6449 
6450   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6451   MF.insert(It, LoopMBB);
6452 
6453   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6454   MF.insert(It, DoneMBB);
6455 
6456   // Transfer the remainder of BB and its successor edges to DoneMBB.
6457   DoneMBB->splice(DoneMBB->begin(), BB,
6458                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6459   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6460 
6461   BB->addSuccessor(LoopMBB);
6462 
6463   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6464   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6465   Register LoReg = MI.getOperand(0).getReg();
6466   Register HiReg = MI.getOperand(1).getReg();
6467   DebugLoc DL = MI.getDebugLoc();
6468 
6469   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6470   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6471       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6472       .addReg(RISCV::X0);
6473   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6474       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6475       .addReg(RISCV::X0);
6476   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6477       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6478       .addReg(RISCV::X0);
6479 
6480   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6481       .addReg(HiReg)
6482       .addReg(ReadAgainReg)
6483       .addMBB(LoopMBB);
6484 
6485   LoopMBB->addSuccessor(LoopMBB);
6486   LoopMBB->addSuccessor(DoneMBB);
6487 
6488   MI.eraseFromParent();
6489 
6490   return DoneMBB;
6491 }
6492 
6493 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6494                                              MachineBasicBlock *BB) {
6495   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6496 
6497   MachineFunction &MF = *BB->getParent();
6498   DebugLoc DL = MI.getDebugLoc();
6499   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6500   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6501   Register LoReg = MI.getOperand(0).getReg();
6502   Register HiReg = MI.getOperand(1).getReg();
6503   Register SrcReg = MI.getOperand(2).getReg();
6504   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6505   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6506 
6507   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6508                           RI);
6509   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6510   MachineMemOperand *MMOLo =
6511       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6512   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6513       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6514   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6515       .addFrameIndex(FI)
6516       .addImm(0)
6517       .addMemOperand(MMOLo);
6518   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6519       .addFrameIndex(FI)
6520       .addImm(4)
6521       .addMemOperand(MMOHi);
6522   MI.eraseFromParent(); // The pseudo instruction is gone now.
6523   return BB;
6524 }
6525 
6526 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6527                                                  MachineBasicBlock *BB) {
6528   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6529          "Unexpected instruction");
6530 
6531   MachineFunction &MF = *BB->getParent();
6532   DebugLoc DL = MI.getDebugLoc();
6533   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6534   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6535   Register DstReg = MI.getOperand(0).getReg();
6536   Register LoReg = MI.getOperand(1).getReg();
6537   Register HiReg = MI.getOperand(2).getReg();
6538   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6539   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6540 
6541   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6542   MachineMemOperand *MMOLo =
6543       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6544   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6545       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6546   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6547       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6548       .addFrameIndex(FI)
6549       .addImm(0)
6550       .addMemOperand(MMOLo);
6551   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6552       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6553       .addFrameIndex(FI)
6554       .addImm(4)
6555       .addMemOperand(MMOHi);
6556   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6557   MI.eraseFromParent(); // The pseudo instruction is gone now.
6558   return BB;
6559 }
6560 
6561 static bool isSelectPseudo(MachineInstr &MI) {
6562   switch (MI.getOpcode()) {
6563   default:
6564     return false;
6565   case RISCV::Select_GPR_Using_CC_GPR:
6566   case RISCV::Select_FPR16_Using_CC_GPR:
6567   case RISCV::Select_FPR32_Using_CC_GPR:
6568   case RISCV::Select_FPR64_Using_CC_GPR:
6569     return true;
6570   }
6571 }
6572 
6573 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6574                                            MachineBasicBlock *BB) {
6575   // To "insert" Select_* instructions, we actually have to insert the triangle
6576   // control-flow pattern.  The incoming instructions know the destination vreg
6577   // to set, the condition code register to branch on, the true/false values to
6578   // select between, and the condcode to use to select the appropriate branch.
6579   //
6580   // We produce the following control flow:
6581   //     HeadMBB
6582   //     |  \
6583   //     |  IfFalseMBB
6584   //     | /
6585   //    TailMBB
6586   //
6587   // When we find a sequence of selects we attempt to optimize their emission
6588   // by sharing the control flow. Currently we only handle cases where we have
6589   // multiple selects with the exact same condition (same LHS, RHS and CC).
6590   // The selects may be interleaved with other instructions if the other
6591   // instructions meet some requirements we deem safe:
6592   // - They are debug instructions. Otherwise,
6593   // - They do not have side-effects, do not access memory and their inputs do
6594   //   not depend on the results of the select pseudo-instructions.
6595   // The TrueV/FalseV operands of the selects cannot depend on the result of
6596   // previous selects in the sequence.
6597   // These conditions could be further relaxed. See the X86 target for a
6598   // related approach and more information.
6599   Register LHS = MI.getOperand(1).getReg();
6600   Register RHS = MI.getOperand(2).getReg();
6601   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6602 
6603   SmallVector<MachineInstr *, 4> SelectDebugValues;
6604   SmallSet<Register, 4> SelectDests;
6605   SelectDests.insert(MI.getOperand(0).getReg());
6606 
6607   MachineInstr *LastSelectPseudo = &MI;
6608 
6609   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6610        SequenceMBBI != E; ++SequenceMBBI) {
6611     if (SequenceMBBI->isDebugInstr())
6612       continue;
6613     else if (isSelectPseudo(*SequenceMBBI)) {
6614       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6615           SequenceMBBI->getOperand(2).getReg() != RHS ||
6616           SequenceMBBI->getOperand(3).getImm() != CC ||
6617           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6618           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6619         break;
6620       LastSelectPseudo = &*SequenceMBBI;
6621       SequenceMBBI->collectDebugValues(SelectDebugValues);
6622       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6623     } else {
6624       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6625           SequenceMBBI->mayLoadOrStore())
6626         break;
6627       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6628             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6629           }))
6630         break;
6631     }
6632   }
6633 
6634   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6635   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6636   DebugLoc DL = MI.getDebugLoc();
6637   MachineFunction::iterator I = ++BB->getIterator();
6638 
6639   MachineBasicBlock *HeadMBB = BB;
6640   MachineFunction *F = BB->getParent();
6641   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6642   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6643 
6644   F->insert(I, IfFalseMBB);
6645   F->insert(I, TailMBB);
6646 
6647   // Transfer debug instructions associated with the selects to TailMBB.
6648   for (MachineInstr *DebugInstr : SelectDebugValues) {
6649     TailMBB->push_back(DebugInstr->removeFromParent());
6650   }
6651 
6652   // Move all instructions after the sequence to TailMBB.
6653   TailMBB->splice(TailMBB->end(), HeadMBB,
6654                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6655   // Update machine-CFG edges by transferring all successors of the current
6656   // block to the new block which will contain the Phi nodes for the selects.
6657   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6658   // Set the successors for HeadMBB.
6659   HeadMBB->addSuccessor(IfFalseMBB);
6660   HeadMBB->addSuccessor(TailMBB);
6661 
6662   // Insert appropriate branch.
6663   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6664 
6665   BuildMI(HeadMBB, DL, TII.get(Opcode))
6666     .addReg(LHS)
6667     .addReg(RHS)
6668     .addMBB(TailMBB);
6669 
6670   // IfFalseMBB just falls through to TailMBB.
6671   IfFalseMBB->addSuccessor(TailMBB);
6672 
6673   // Create PHIs for all of the select pseudo-instructions.
6674   auto SelectMBBI = MI.getIterator();
6675   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6676   auto InsertionPoint = TailMBB->begin();
6677   while (SelectMBBI != SelectEnd) {
6678     auto Next = std::next(SelectMBBI);
6679     if (isSelectPseudo(*SelectMBBI)) {
6680       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6681       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6682               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6683           .addReg(SelectMBBI->getOperand(4).getReg())
6684           .addMBB(HeadMBB)
6685           .addReg(SelectMBBI->getOperand(5).getReg())
6686           .addMBB(IfFalseMBB);
6687       SelectMBBI->eraseFromParent();
6688     }
6689     SelectMBBI = Next;
6690   }
6691 
6692   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6693   return TailMBB;
6694 }
6695 
6696 MachineBasicBlock *
6697 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6698                                                  MachineBasicBlock *BB) const {
6699   switch (MI.getOpcode()) {
6700   default:
6701     llvm_unreachable("Unexpected instr type to insert");
6702   case RISCV::ReadCycleWide:
6703     assert(!Subtarget.is64Bit() &&
6704            "ReadCycleWrite is only to be used on riscv32");
6705     return emitReadCycleWidePseudo(MI, BB);
6706   case RISCV::Select_GPR_Using_CC_GPR:
6707   case RISCV::Select_FPR16_Using_CC_GPR:
6708   case RISCV::Select_FPR32_Using_CC_GPR:
6709   case RISCV::Select_FPR64_Using_CC_GPR:
6710     return emitSelectPseudo(MI, BB);
6711   case RISCV::BuildPairF64Pseudo:
6712     return emitBuildPairF64Pseudo(MI, BB);
6713   case RISCV::SplitF64Pseudo:
6714     return emitSplitF64Pseudo(MI, BB);
6715   }
6716 }
6717 
6718 // Calling Convention Implementation.
6719 // The expectations for frontend ABI lowering vary from target to target.
6720 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6721 // details, but this is a longer term goal. For now, we simply try to keep the
6722 // role of the frontend as simple and well-defined as possible. The rules can
6723 // be summarised as:
6724 // * Never split up large scalar arguments. We handle them here.
6725 // * If a hardfloat calling convention is being used, and the struct may be
6726 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6727 // available, then pass as two separate arguments. If either the GPRs or FPRs
6728 // are exhausted, then pass according to the rule below.
6729 // * If a struct could never be passed in registers or directly in a stack
6730 // slot (as it is larger than 2*XLEN and the floating point rules don't
6731 // apply), then pass it using a pointer with the byval attribute.
6732 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6733 // word-sized array or a 2*XLEN scalar (depending on alignment).
6734 // * The frontend can determine whether a struct is returned by reference or
6735 // not based on its size and fields. If it will be returned by reference, the
6736 // frontend must modify the prototype so a pointer with the sret annotation is
6737 // passed as the first argument. This is not necessary for large scalar
6738 // returns.
6739 // * Struct return values and varargs should be coerced to structs containing
6740 // register-size fields in the same situations they would be for fixed
6741 // arguments.
6742 
6743 static const MCPhysReg ArgGPRs[] = {
6744   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6745   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6746 };
6747 static const MCPhysReg ArgFPR16s[] = {
6748   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6749   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6750 };
6751 static const MCPhysReg ArgFPR32s[] = {
6752   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6753   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6754 };
6755 static const MCPhysReg ArgFPR64s[] = {
6756   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6757   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6758 };
6759 // This is an interim calling convention and it may be changed in the future.
6760 static const MCPhysReg ArgVRs[] = {
6761     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6762     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6763     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6764 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6765                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6766                                      RISCV::V20M2, RISCV::V22M2};
6767 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6768                                      RISCV::V20M4};
6769 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6770 
6771 // Pass a 2*XLEN argument that has been split into two XLEN values through
6772 // registers or the stack as necessary.
6773 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6774                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6775                                 MVT ValVT2, MVT LocVT2,
6776                                 ISD::ArgFlagsTy ArgFlags2) {
6777   unsigned XLenInBytes = XLen / 8;
6778   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6779     // At least one half can be passed via register.
6780     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6781                                      VA1.getLocVT(), CCValAssign::Full));
6782   } else {
6783     // Both halves must be passed on the stack, with proper alignment.
6784     Align StackAlign =
6785         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6786     State.addLoc(
6787         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6788                             State.AllocateStack(XLenInBytes, StackAlign),
6789                             VA1.getLocVT(), CCValAssign::Full));
6790     State.addLoc(CCValAssign::getMem(
6791         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6792         LocVT2, CCValAssign::Full));
6793     return false;
6794   }
6795 
6796   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6797     // The second half can also be passed via register.
6798     State.addLoc(
6799         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6800   } else {
6801     // The second half is passed via the stack, without additional alignment.
6802     State.addLoc(CCValAssign::getMem(
6803         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6804         LocVT2, CCValAssign::Full));
6805   }
6806 
6807   return false;
6808 }
6809 
6810 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
6811                                Optional<unsigned> FirstMaskArgument,
6812                                CCState &State, const RISCVTargetLowering &TLI) {
6813   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6814   if (RC == &RISCV::VRRegClass) {
6815     // Assign the first mask argument to V0.
6816     // This is an interim calling convention and it may be changed in the
6817     // future.
6818     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
6819       return State.AllocateReg(RISCV::V0);
6820     return State.AllocateReg(ArgVRs);
6821   }
6822   if (RC == &RISCV::VRM2RegClass)
6823     return State.AllocateReg(ArgVRM2s);
6824   if (RC == &RISCV::VRM4RegClass)
6825     return State.AllocateReg(ArgVRM4s);
6826   if (RC == &RISCV::VRM8RegClass)
6827     return State.AllocateReg(ArgVRM8s);
6828   llvm_unreachable("Unhandled register class for ValueType");
6829 }
6830 
6831 // Implements the RISC-V calling convention. Returns true upon failure.
6832 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6833                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6834                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6835                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6836                      Optional<unsigned> FirstMaskArgument) {
6837   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6838   assert(XLen == 32 || XLen == 64);
6839   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6840 
6841   // Any return value split in to more than two values can't be returned
6842   // directly. Vectors are returned via the available vector registers.
6843   if (!LocVT.isVector() && IsRet && ValNo > 1)
6844     return true;
6845 
6846   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6847   // variadic argument, or if no F16/F32 argument registers are available.
6848   bool UseGPRForF16_F32 = true;
6849   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6850   // variadic argument, or if no F64 argument registers are available.
6851   bool UseGPRForF64 = true;
6852 
6853   switch (ABI) {
6854   default:
6855     llvm_unreachable("Unexpected ABI");
6856   case RISCVABI::ABI_ILP32:
6857   case RISCVABI::ABI_LP64:
6858     break;
6859   case RISCVABI::ABI_ILP32F:
6860   case RISCVABI::ABI_LP64F:
6861     UseGPRForF16_F32 = !IsFixed;
6862     break;
6863   case RISCVABI::ABI_ILP32D:
6864   case RISCVABI::ABI_LP64D:
6865     UseGPRForF16_F32 = !IsFixed;
6866     UseGPRForF64 = !IsFixed;
6867     break;
6868   }
6869 
6870   // FPR16, FPR32, and FPR64 alias each other.
6871   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6872     UseGPRForF16_F32 = true;
6873     UseGPRForF64 = true;
6874   }
6875 
6876   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6877   // similar local variables rather than directly checking against the target
6878   // ABI.
6879 
6880   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6881     LocVT = XLenVT;
6882     LocInfo = CCValAssign::BCvt;
6883   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6884     LocVT = MVT::i64;
6885     LocInfo = CCValAssign::BCvt;
6886   }
6887 
6888   // If this is a variadic argument, the RISC-V calling convention requires
6889   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6890   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6891   // be used regardless of whether the original argument was split during
6892   // legalisation or not. The argument will not be passed by registers if the
6893   // original type is larger than 2*XLEN, so the register alignment rule does
6894   // not apply.
6895   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6896   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6897       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6898     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6899     // Skip 'odd' register if necessary.
6900     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6901       State.AllocateReg(ArgGPRs);
6902   }
6903 
6904   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6905   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6906       State.getPendingArgFlags();
6907 
6908   assert(PendingLocs.size() == PendingArgFlags.size() &&
6909          "PendingLocs and PendingArgFlags out of sync");
6910 
6911   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6912   // registers are exhausted.
6913   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6914     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6915            "Can't lower f64 if it is split");
6916     // Depending on available argument GPRS, f64 may be passed in a pair of
6917     // GPRs, split between a GPR and the stack, or passed completely on the
6918     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6919     // cases.
6920     Register Reg = State.AllocateReg(ArgGPRs);
6921     LocVT = MVT::i32;
6922     if (!Reg) {
6923       unsigned StackOffset = State.AllocateStack(8, Align(8));
6924       State.addLoc(
6925           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6926       return false;
6927     }
6928     if (!State.AllocateReg(ArgGPRs))
6929       State.AllocateStack(4, Align(4));
6930     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6931     return false;
6932   }
6933 
6934   // Fixed-length vectors are located in the corresponding scalable-vector
6935   // container types.
6936   if (ValVT.isFixedLengthVector())
6937     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6938 
6939   // Split arguments might be passed indirectly, so keep track of the pending
6940   // values. Split vectors are passed via a mix of registers and indirectly, so
6941   // treat them as we would any other argument.
6942   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6943     LocVT = XLenVT;
6944     LocInfo = CCValAssign::Indirect;
6945     PendingLocs.push_back(
6946         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6947     PendingArgFlags.push_back(ArgFlags);
6948     if (!ArgFlags.isSplitEnd()) {
6949       return false;
6950     }
6951   }
6952 
6953   // If the split argument only had two elements, it should be passed directly
6954   // in registers or on the stack.
6955   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6956     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6957     // Apply the normal calling convention rules to the first half of the
6958     // split argument.
6959     CCValAssign VA = PendingLocs[0];
6960     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6961     PendingLocs.clear();
6962     PendingArgFlags.clear();
6963     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6964                                ArgFlags);
6965   }
6966 
6967   // Allocate to a register if possible, or else a stack slot.
6968   Register Reg;
6969   unsigned StoreSizeBytes = XLen / 8;
6970   Align StackAlign = Align(XLen / 8);
6971 
6972   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6973     Reg = State.AllocateReg(ArgFPR16s);
6974   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6975     Reg = State.AllocateReg(ArgFPR32s);
6976   else if (ValVT == MVT::f64 && !UseGPRForF64)
6977     Reg = State.AllocateReg(ArgFPR64s);
6978   else if (ValVT.isVector()) {
6979     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
6980     if (!Reg) {
6981       // For return values, the vector must be passed fully via registers or
6982       // via the stack.
6983       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6984       // but we're using all of them.
6985       if (IsRet)
6986         return true;
6987       // Try using a GPR to pass the address
6988       if ((Reg = State.AllocateReg(ArgGPRs))) {
6989         LocVT = XLenVT;
6990         LocInfo = CCValAssign::Indirect;
6991       } else if (ValVT.isScalableVector()) {
6992         report_fatal_error("Unable to pass scalable vector types on the stack");
6993       } else {
6994         // Pass fixed-length vectors on the stack.
6995         LocVT = ValVT;
6996         StoreSizeBytes = ValVT.getStoreSize();
6997         // Align vectors to their element sizes, being careful for vXi1
6998         // vectors.
6999         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7000       }
7001     }
7002   } else {
7003     Reg = State.AllocateReg(ArgGPRs);
7004   }
7005 
7006   unsigned StackOffset =
7007       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
7008 
7009   // If we reach this point and PendingLocs is non-empty, we must be at the
7010   // end of a split argument that must be passed indirectly.
7011   if (!PendingLocs.empty()) {
7012     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
7013     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
7014 
7015     for (auto &It : PendingLocs) {
7016       if (Reg)
7017         It.convertToReg(Reg);
7018       else
7019         It.convertToMem(StackOffset);
7020       State.addLoc(It);
7021     }
7022     PendingLocs.clear();
7023     PendingArgFlags.clear();
7024     return false;
7025   }
7026 
7027   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
7028           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
7029          "Expected an XLenVT or vector types at this stage");
7030 
7031   if (Reg) {
7032     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7033     return false;
7034   }
7035 
7036   // When a floating-point value is passed on the stack, no bit-conversion is
7037   // needed.
7038   if (ValVT.isFloatingPoint()) {
7039     LocVT = ValVT;
7040     LocInfo = CCValAssign::Full;
7041   }
7042   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7043   return false;
7044 }
7045 
7046 template <typename ArgTy>
7047 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
7048   for (const auto &ArgIdx : enumerate(Args)) {
7049     MVT ArgVT = ArgIdx.value().VT;
7050     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
7051       return ArgIdx.index();
7052   }
7053   return None;
7054 }
7055 
7056 void RISCVTargetLowering::analyzeInputArgs(
7057     MachineFunction &MF, CCState &CCInfo,
7058     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
7059     RISCVCCAssignFn Fn) const {
7060   unsigned NumArgs = Ins.size();
7061   FunctionType *FType = MF.getFunction().getFunctionType();
7062 
7063   Optional<unsigned> FirstMaskArgument;
7064   if (Subtarget.hasStdExtV())
7065     FirstMaskArgument = preAssignMask(Ins);
7066 
7067   for (unsigned i = 0; i != NumArgs; ++i) {
7068     MVT ArgVT = Ins[i].VT;
7069     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
7070 
7071     Type *ArgTy = nullptr;
7072     if (IsRet)
7073       ArgTy = FType->getReturnType();
7074     else if (Ins[i].isOrigArg())
7075       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
7076 
7077     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7078     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7079            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7080            FirstMaskArgument)) {
7081       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7082                         << EVT(ArgVT).getEVTString() << '\n');
7083       llvm_unreachable(nullptr);
7084     }
7085   }
7086 }
7087 
7088 void RISCVTargetLowering::analyzeOutputArgs(
7089     MachineFunction &MF, CCState &CCInfo,
7090     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7091     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7092   unsigned NumArgs = Outs.size();
7093 
7094   Optional<unsigned> FirstMaskArgument;
7095   if (Subtarget.hasStdExtV())
7096     FirstMaskArgument = preAssignMask(Outs);
7097 
7098   for (unsigned i = 0; i != NumArgs; i++) {
7099     MVT ArgVT = Outs[i].VT;
7100     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7101     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7102 
7103     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7104     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7105            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7106            FirstMaskArgument)) {
7107       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7108                         << EVT(ArgVT).getEVTString() << "\n");
7109       llvm_unreachable(nullptr);
7110     }
7111   }
7112 }
7113 
7114 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7115 // values.
7116 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7117                                    const CCValAssign &VA, const SDLoc &DL,
7118                                    const RISCVSubtarget &Subtarget) {
7119   switch (VA.getLocInfo()) {
7120   default:
7121     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7122   case CCValAssign::Full:
7123     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7124       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7125     break;
7126   case CCValAssign::BCvt:
7127     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7128       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7129     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7130       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7131     else
7132       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7133     break;
7134   }
7135   return Val;
7136 }
7137 
7138 // The caller is responsible for loading the full value if the argument is
7139 // passed with CCValAssign::Indirect.
7140 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7141                                 const CCValAssign &VA, const SDLoc &DL,
7142                                 const RISCVTargetLowering &TLI) {
7143   MachineFunction &MF = DAG.getMachineFunction();
7144   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7145   EVT LocVT = VA.getLocVT();
7146   SDValue Val;
7147   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7148   Register VReg = RegInfo.createVirtualRegister(RC);
7149   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7150   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7151 
7152   if (VA.getLocInfo() == CCValAssign::Indirect)
7153     return Val;
7154 
7155   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7156 }
7157 
7158 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7159                                    const CCValAssign &VA, const SDLoc &DL,
7160                                    const RISCVSubtarget &Subtarget) {
7161   EVT LocVT = VA.getLocVT();
7162 
7163   switch (VA.getLocInfo()) {
7164   default:
7165     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7166   case CCValAssign::Full:
7167     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7168       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7169     break;
7170   case CCValAssign::BCvt:
7171     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7172       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7173     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7174       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7175     else
7176       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7177     break;
7178   }
7179   return Val;
7180 }
7181 
7182 // The caller is responsible for loading the full value if the argument is
7183 // passed with CCValAssign::Indirect.
7184 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7185                                 const CCValAssign &VA, const SDLoc &DL) {
7186   MachineFunction &MF = DAG.getMachineFunction();
7187   MachineFrameInfo &MFI = MF.getFrameInfo();
7188   EVT LocVT = VA.getLocVT();
7189   EVT ValVT = VA.getValVT();
7190   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7191   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7192                                  /*Immutable=*/true);
7193   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7194   SDValue Val;
7195 
7196   ISD::LoadExtType ExtType;
7197   switch (VA.getLocInfo()) {
7198   default:
7199     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7200   case CCValAssign::Full:
7201   case CCValAssign::Indirect:
7202   case CCValAssign::BCvt:
7203     ExtType = ISD::NON_EXTLOAD;
7204     break;
7205   }
7206   Val = DAG.getExtLoad(
7207       ExtType, DL, LocVT, Chain, FIN,
7208       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7209   return Val;
7210 }
7211 
7212 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7213                                        const CCValAssign &VA, const SDLoc &DL) {
7214   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7215          "Unexpected VA");
7216   MachineFunction &MF = DAG.getMachineFunction();
7217   MachineFrameInfo &MFI = MF.getFrameInfo();
7218   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7219 
7220   if (VA.isMemLoc()) {
7221     // f64 is passed on the stack.
7222     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7223     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7224     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7225                        MachinePointerInfo::getFixedStack(MF, FI));
7226   }
7227 
7228   assert(VA.isRegLoc() && "Expected register VA assignment");
7229 
7230   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7231   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7232   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7233   SDValue Hi;
7234   if (VA.getLocReg() == RISCV::X17) {
7235     // Second half of f64 is passed on the stack.
7236     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7237     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7238     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7239                      MachinePointerInfo::getFixedStack(MF, FI));
7240   } else {
7241     // Second half of f64 is passed in another GPR.
7242     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7243     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7244     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7245   }
7246   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7247 }
7248 
7249 // FastCC has less than 1% performance improvement for some particular
7250 // benchmark. But theoretically, it may has benenfit for some cases.
7251 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7252                             unsigned ValNo, MVT ValVT, MVT LocVT,
7253                             CCValAssign::LocInfo LocInfo,
7254                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7255                             bool IsFixed, bool IsRet, Type *OrigTy,
7256                             const RISCVTargetLowering &TLI,
7257                             Optional<unsigned> FirstMaskArgument) {
7258 
7259   // X5 and X6 might be used for save-restore libcall.
7260   static const MCPhysReg GPRList[] = {
7261       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7262       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7263       RISCV::X29, RISCV::X30, RISCV::X31};
7264 
7265   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7266     if (unsigned Reg = State.AllocateReg(GPRList)) {
7267       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7268       return false;
7269     }
7270   }
7271 
7272   if (LocVT == MVT::f16) {
7273     static const MCPhysReg FPR16List[] = {
7274         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7275         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7276         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7277         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7278     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7279       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7280       return false;
7281     }
7282   }
7283 
7284   if (LocVT == MVT::f32) {
7285     static const MCPhysReg FPR32List[] = {
7286         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7287         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7288         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7289         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7290     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7291       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7292       return false;
7293     }
7294   }
7295 
7296   if (LocVT == MVT::f64) {
7297     static const MCPhysReg FPR64List[] = {
7298         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7299         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7300         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7301         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7302     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7303       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7304       return false;
7305     }
7306   }
7307 
7308   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7309     unsigned Offset4 = State.AllocateStack(4, Align(4));
7310     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7311     return false;
7312   }
7313 
7314   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7315     unsigned Offset5 = State.AllocateStack(8, Align(8));
7316     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7317     return false;
7318   }
7319 
7320   if (LocVT.isVector()) {
7321     if (unsigned Reg =
7322             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7323       // Fixed-length vectors are located in the corresponding scalable-vector
7324       // container types.
7325       if (ValVT.isFixedLengthVector())
7326         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7327       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7328     } else {
7329       // Try and pass the address via a "fast" GPR.
7330       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7331         LocInfo = CCValAssign::Indirect;
7332         LocVT = TLI.getSubtarget().getXLenVT();
7333         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7334       } else if (ValVT.isFixedLengthVector()) {
7335         auto StackAlign =
7336             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7337         unsigned StackOffset =
7338             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7339         State.addLoc(
7340             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7341       } else {
7342         // Can't pass scalable vectors on the stack.
7343         return true;
7344       }
7345     }
7346 
7347     return false;
7348   }
7349 
7350   return true; // CC didn't match.
7351 }
7352 
7353 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7354                          CCValAssign::LocInfo LocInfo,
7355                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7356 
7357   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7358     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7359     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7360     static const MCPhysReg GPRList[] = {
7361         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7362         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7363     if (unsigned Reg = State.AllocateReg(GPRList)) {
7364       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7365       return false;
7366     }
7367   }
7368 
7369   if (LocVT == MVT::f32) {
7370     // Pass in STG registers: F1, ..., F6
7371     //                        fs0 ... fs5
7372     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7373                                           RISCV::F18_F, RISCV::F19_F,
7374                                           RISCV::F20_F, RISCV::F21_F};
7375     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7376       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7377       return false;
7378     }
7379   }
7380 
7381   if (LocVT == MVT::f64) {
7382     // Pass in STG registers: D1, ..., D6
7383     //                        fs6 ... fs11
7384     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7385                                           RISCV::F24_D, RISCV::F25_D,
7386                                           RISCV::F26_D, RISCV::F27_D};
7387     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7388       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7389       return false;
7390     }
7391   }
7392 
7393   report_fatal_error("No registers left in GHC calling convention");
7394   return true;
7395 }
7396 
7397 // Transform physical registers into virtual registers.
7398 SDValue RISCVTargetLowering::LowerFormalArguments(
7399     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7400     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7401     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7402 
7403   MachineFunction &MF = DAG.getMachineFunction();
7404 
7405   switch (CallConv) {
7406   default:
7407     report_fatal_error("Unsupported calling convention");
7408   case CallingConv::C:
7409   case CallingConv::Fast:
7410     break;
7411   case CallingConv::GHC:
7412     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7413         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7414       report_fatal_error(
7415         "GHC calling convention requires the F and D instruction set extensions");
7416   }
7417 
7418   const Function &Func = MF.getFunction();
7419   if (Func.hasFnAttribute("interrupt")) {
7420     if (!Func.arg_empty())
7421       report_fatal_error(
7422         "Functions with the interrupt attribute cannot have arguments!");
7423 
7424     StringRef Kind =
7425       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7426 
7427     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7428       report_fatal_error(
7429         "Function interrupt attribute argument not supported!");
7430   }
7431 
7432   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7433   MVT XLenVT = Subtarget.getXLenVT();
7434   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7435   // Used with vargs to acumulate store chains.
7436   std::vector<SDValue> OutChains;
7437 
7438   // Assign locations to all of the incoming arguments.
7439   SmallVector<CCValAssign, 16> ArgLocs;
7440   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7441 
7442   if (CallConv == CallingConv::GHC)
7443     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7444   else
7445     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7446                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7447                                                    : CC_RISCV);
7448 
7449   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7450     CCValAssign &VA = ArgLocs[i];
7451     SDValue ArgValue;
7452     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7453     // case.
7454     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7455       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7456     else if (VA.isRegLoc())
7457       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7458     else
7459       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7460 
7461     if (VA.getLocInfo() == CCValAssign::Indirect) {
7462       // If the original argument was split and passed by reference (e.g. i128
7463       // on RV32), we need to load all parts of it here (using the same
7464       // address). Vectors may be partly split to registers and partly to the
7465       // stack, in which case the base address is partly offset and subsequent
7466       // stores are relative to that.
7467       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7468                                    MachinePointerInfo()));
7469       unsigned ArgIndex = Ins[i].OrigArgIndex;
7470       unsigned ArgPartOffset = Ins[i].PartOffset;
7471       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7472       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7473         CCValAssign &PartVA = ArgLocs[i + 1];
7474         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7475         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7476         if (PartVA.getValVT().isScalableVector())
7477           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7478         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7479         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7480                                      MachinePointerInfo()));
7481         ++i;
7482       }
7483       continue;
7484     }
7485     InVals.push_back(ArgValue);
7486   }
7487 
7488   if (IsVarArg) {
7489     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7490     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7491     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7492     MachineFrameInfo &MFI = MF.getFrameInfo();
7493     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7494     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7495 
7496     // Offset of the first variable argument from stack pointer, and size of
7497     // the vararg save area. For now, the varargs save area is either zero or
7498     // large enough to hold a0-a7.
7499     int VaArgOffset, VarArgsSaveSize;
7500 
7501     // If all registers are allocated, then all varargs must be passed on the
7502     // stack and we don't need to save any argregs.
7503     if (ArgRegs.size() == Idx) {
7504       VaArgOffset = CCInfo.getNextStackOffset();
7505       VarArgsSaveSize = 0;
7506     } else {
7507       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7508       VaArgOffset = -VarArgsSaveSize;
7509     }
7510 
7511     // Record the frame index of the first variable argument
7512     // which is a value necessary to VASTART.
7513     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7514     RVFI->setVarArgsFrameIndex(FI);
7515 
7516     // If saving an odd number of registers then create an extra stack slot to
7517     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7518     // offsets to even-numbered registered remain 2*XLEN-aligned.
7519     if (Idx % 2) {
7520       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7521       VarArgsSaveSize += XLenInBytes;
7522     }
7523 
7524     // Copy the integer registers that may have been used for passing varargs
7525     // to the vararg save area.
7526     for (unsigned I = Idx; I < ArgRegs.size();
7527          ++I, VaArgOffset += XLenInBytes) {
7528       const Register Reg = RegInfo.createVirtualRegister(RC);
7529       RegInfo.addLiveIn(ArgRegs[I], Reg);
7530       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7531       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7532       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7533       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7534                                    MachinePointerInfo::getFixedStack(MF, FI));
7535       cast<StoreSDNode>(Store.getNode())
7536           ->getMemOperand()
7537           ->setValue((Value *)nullptr);
7538       OutChains.push_back(Store);
7539     }
7540     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7541   }
7542 
7543   // All stores are grouped in one node to allow the matching between
7544   // the size of Ins and InVals. This only happens for vararg functions.
7545   if (!OutChains.empty()) {
7546     OutChains.push_back(Chain);
7547     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7548   }
7549 
7550   return Chain;
7551 }
7552 
7553 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7554 /// for tail call optimization.
7555 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7556 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7557     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7558     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7559 
7560   auto &Callee = CLI.Callee;
7561   auto CalleeCC = CLI.CallConv;
7562   auto &Outs = CLI.Outs;
7563   auto &Caller = MF.getFunction();
7564   auto CallerCC = Caller.getCallingConv();
7565 
7566   // Exception-handling functions need a special set of instructions to
7567   // indicate a return to the hardware. Tail-calling another function would
7568   // probably break this.
7569   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7570   // should be expanded as new function attributes are introduced.
7571   if (Caller.hasFnAttribute("interrupt"))
7572     return false;
7573 
7574   // Do not tail call opt if the stack is used to pass parameters.
7575   if (CCInfo.getNextStackOffset() != 0)
7576     return false;
7577 
7578   // Do not tail call opt if any parameters need to be passed indirectly.
7579   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7580   // passed indirectly. So the address of the value will be passed in a
7581   // register, or if not available, then the address is put on the stack. In
7582   // order to pass indirectly, space on the stack often needs to be allocated
7583   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7584   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7585   // are passed CCValAssign::Indirect.
7586   for (auto &VA : ArgLocs)
7587     if (VA.getLocInfo() == CCValAssign::Indirect)
7588       return false;
7589 
7590   // Do not tail call opt if either caller or callee uses struct return
7591   // semantics.
7592   auto IsCallerStructRet = Caller.hasStructRetAttr();
7593   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7594   if (IsCallerStructRet || IsCalleeStructRet)
7595     return false;
7596 
7597   // Externally-defined functions with weak linkage should not be
7598   // tail-called. The behaviour of branch instructions in this situation (as
7599   // used for tail calls) is implementation-defined, so we cannot rely on the
7600   // linker replacing the tail call with a return.
7601   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7602     const GlobalValue *GV = G->getGlobal();
7603     if (GV->hasExternalWeakLinkage())
7604       return false;
7605   }
7606 
7607   // The callee has to preserve all registers the caller needs to preserve.
7608   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7609   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7610   if (CalleeCC != CallerCC) {
7611     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7612     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7613       return false;
7614   }
7615 
7616   // Byval parameters hand the function a pointer directly into the stack area
7617   // we want to reuse during a tail call. Working around this *is* possible
7618   // but less efficient and uglier in LowerCall.
7619   for (auto &Arg : Outs)
7620     if (Arg.Flags.isByVal())
7621       return false;
7622 
7623   return true;
7624 }
7625 
7626 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7627   return DAG.getDataLayout().getPrefTypeAlign(
7628       VT.getTypeForEVT(*DAG.getContext()));
7629 }
7630 
7631 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7632 // and output parameter nodes.
7633 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7634                                        SmallVectorImpl<SDValue> &InVals) const {
7635   SelectionDAG &DAG = CLI.DAG;
7636   SDLoc &DL = CLI.DL;
7637   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7638   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7639   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7640   SDValue Chain = CLI.Chain;
7641   SDValue Callee = CLI.Callee;
7642   bool &IsTailCall = CLI.IsTailCall;
7643   CallingConv::ID CallConv = CLI.CallConv;
7644   bool IsVarArg = CLI.IsVarArg;
7645   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7646   MVT XLenVT = Subtarget.getXLenVT();
7647 
7648   MachineFunction &MF = DAG.getMachineFunction();
7649 
7650   // Analyze the operands of the call, assigning locations to each operand.
7651   SmallVector<CCValAssign, 16> ArgLocs;
7652   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7653 
7654   if (CallConv == CallingConv::GHC)
7655     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7656   else
7657     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7658                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7659                                                     : CC_RISCV);
7660 
7661   // Check if it's really possible to do a tail call.
7662   if (IsTailCall)
7663     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7664 
7665   if (IsTailCall)
7666     ++NumTailCalls;
7667   else if (CLI.CB && CLI.CB->isMustTailCall())
7668     report_fatal_error("failed to perform tail call elimination on a call "
7669                        "site marked musttail");
7670 
7671   // Get a count of how many bytes are to be pushed on the stack.
7672   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7673 
7674   // Create local copies for byval args
7675   SmallVector<SDValue, 8> ByValArgs;
7676   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7677     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7678     if (!Flags.isByVal())
7679       continue;
7680 
7681     SDValue Arg = OutVals[i];
7682     unsigned Size = Flags.getByValSize();
7683     Align Alignment = Flags.getNonZeroByValAlign();
7684 
7685     int FI =
7686         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7687     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7688     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7689 
7690     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7691                           /*IsVolatile=*/false,
7692                           /*AlwaysInline=*/false, IsTailCall,
7693                           MachinePointerInfo(), MachinePointerInfo());
7694     ByValArgs.push_back(FIPtr);
7695   }
7696 
7697   if (!IsTailCall)
7698     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7699 
7700   // Copy argument values to their designated locations.
7701   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7702   SmallVector<SDValue, 8> MemOpChains;
7703   SDValue StackPtr;
7704   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7705     CCValAssign &VA = ArgLocs[i];
7706     SDValue ArgValue = OutVals[i];
7707     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7708 
7709     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7710     bool IsF64OnRV32DSoftABI =
7711         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7712     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7713       SDValue SplitF64 = DAG.getNode(
7714           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7715       SDValue Lo = SplitF64.getValue(0);
7716       SDValue Hi = SplitF64.getValue(1);
7717 
7718       Register RegLo = VA.getLocReg();
7719       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7720 
7721       if (RegLo == RISCV::X17) {
7722         // Second half of f64 is passed on the stack.
7723         // Work out the address of the stack slot.
7724         if (!StackPtr.getNode())
7725           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7726         // Emit the store.
7727         MemOpChains.push_back(
7728             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7729       } else {
7730         // Second half of f64 is passed in another GPR.
7731         assert(RegLo < RISCV::X31 && "Invalid register pair");
7732         Register RegHigh = RegLo + 1;
7733         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7734       }
7735       continue;
7736     }
7737 
7738     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7739     // as any other MemLoc.
7740 
7741     // Promote the value if needed.
7742     // For now, only handle fully promoted and indirect arguments.
7743     if (VA.getLocInfo() == CCValAssign::Indirect) {
7744       // Store the argument in a stack slot and pass its address.
7745       Align StackAlign =
7746           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7747                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7748       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7749       // If the original argument was split (e.g. i128), we need
7750       // to store the required parts of it here (and pass just one address).
7751       // Vectors may be partly split to registers and partly to the stack, in
7752       // which case the base address is partly offset and subsequent stores are
7753       // relative to that.
7754       unsigned ArgIndex = Outs[i].OrigArgIndex;
7755       unsigned ArgPartOffset = Outs[i].PartOffset;
7756       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7757       // Calculate the total size to store. We don't have access to what we're
7758       // actually storing other than performing the loop and collecting the
7759       // info.
7760       SmallVector<std::pair<SDValue, SDValue>> Parts;
7761       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7762         SDValue PartValue = OutVals[i + 1];
7763         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7764         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7765         EVT PartVT = PartValue.getValueType();
7766         if (PartVT.isScalableVector())
7767           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7768         StoredSize += PartVT.getStoreSize();
7769         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7770         Parts.push_back(std::make_pair(PartValue, Offset));
7771         ++i;
7772       }
7773       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7774       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7775       MemOpChains.push_back(
7776           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7777                        MachinePointerInfo::getFixedStack(MF, FI)));
7778       for (const auto &Part : Parts) {
7779         SDValue PartValue = Part.first;
7780         SDValue PartOffset = Part.second;
7781         SDValue Address =
7782             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7783         MemOpChains.push_back(
7784             DAG.getStore(Chain, DL, PartValue, Address,
7785                          MachinePointerInfo::getFixedStack(MF, FI)));
7786       }
7787       ArgValue = SpillSlot;
7788     } else {
7789       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7790     }
7791 
7792     // Use local copy if it is a byval arg.
7793     if (Flags.isByVal())
7794       ArgValue = ByValArgs[j++];
7795 
7796     if (VA.isRegLoc()) {
7797       // Queue up the argument copies and emit them at the end.
7798       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7799     } else {
7800       assert(VA.isMemLoc() && "Argument not register or memory");
7801       assert(!IsTailCall && "Tail call not allowed if stack is used "
7802                             "for passing parameters");
7803 
7804       // Work out the address of the stack slot.
7805       if (!StackPtr.getNode())
7806         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7807       SDValue Address =
7808           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7809                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7810 
7811       // Emit the store.
7812       MemOpChains.push_back(
7813           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7814     }
7815   }
7816 
7817   // Join the stores, which are independent of one another.
7818   if (!MemOpChains.empty())
7819     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7820 
7821   SDValue Glue;
7822 
7823   // Build a sequence of copy-to-reg nodes, chained and glued together.
7824   for (auto &Reg : RegsToPass) {
7825     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7826     Glue = Chain.getValue(1);
7827   }
7828 
7829   // Validate that none of the argument registers have been marked as
7830   // reserved, if so report an error. Do the same for the return address if this
7831   // is not a tailcall.
7832   validateCCReservedRegs(RegsToPass, MF);
7833   if (!IsTailCall &&
7834       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7835     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7836         MF.getFunction(),
7837         "Return address register required, but has been reserved."});
7838 
7839   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7840   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7841   // split it and then direct call can be matched by PseudoCALL.
7842   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7843     const GlobalValue *GV = S->getGlobal();
7844 
7845     unsigned OpFlags = RISCVII::MO_CALL;
7846     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7847       OpFlags = RISCVII::MO_PLT;
7848 
7849     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7850   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7851     unsigned OpFlags = RISCVII::MO_CALL;
7852 
7853     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7854                                                  nullptr))
7855       OpFlags = RISCVII::MO_PLT;
7856 
7857     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7858   }
7859 
7860   // The first call operand is the chain and the second is the target address.
7861   SmallVector<SDValue, 8> Ops;
7862   Ops.push_back(Chain);
7863   Ops.push_back(Callee);
7864 
7865   // Add argument registers to the end of the list so that they are
7866   // known live into the call.
7867   for (auto &Reg : RegsToPass)
7868     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7869 
7870   if (!IsTailCall) {
7871     // Add a register mask operand representing the call-preserved registers.
7872     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7873     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7874     assert(Mask && "Missing call preserved mask for calling convention");
7875     Ops.push_back(DAG.getRegisterMask(Mask));
7876   }
7877 
7878   // Glue the call to the argument copies, if any.
7879   if (Glue.getNode())
7880     Ops.push_back(Glue);
7881 
7882   // Emit the call.
7883   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7884 
7885   if (IsTailCall) {
7886     MF.getFrameInfo().setHasTailCall();
7887     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7888   }
7889 
7890   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7891   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7892   Glue = Chain.getValue(1);
7893 
7894   // Mark the end of the call, which is glued to the call itself.
7895   Chain = DAG.getCALLSEQ_END(Chain,
7896                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7897                              DAG.getConstant(0, DL, PtrVT, true),
7898                              Glue, DL);
7899   Glue = Chain.getValue(1);
7900 
7901   // Assign locations to each value returned by this call.
7902   SmallVector<CCValAssign, 16> RVLocs;
7903   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7904   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
7905 
7906   // Copy all of the result registers out of their specified physreg.
7907   for (auto &VA : RVLocs) {
7908     // Copy the value out
7909     SDValue RetValue =
7910         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7911     // Glue the RetValue to the end of the call sequence
7912     Chain = RetValue.getValue(1);
7913     Glue = RetValue.getValue(2);
7914 
7915     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7916       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7917       SDValue RetValue2 =
7918           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7919       Chain = RetValue2.getValue(1);
7920       Glue = RetValue2.getValue(2);
7921       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7922                              RetValue2);
7923     }
7924 
7925     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7926 
7927     InVals.push_back(RetValue);
7928   }
7929 
7930   return Chain;
7931 }
7932 
7933 bool RISCVTargetLowering::CanLowerReturn(
7934     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7935     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7936   SmallVector<CCValAssign, 16> RVLocs;
7937   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7938 
7939   Optional<unsigned> FirstMaskArgument;
7940   if (Subtarget.hasStdExtV())
7941     FirstMaskArgument = preAssignMask(Outs);
7942 
7943   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7944     MVT VT = Outs[i].VT;
7945     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7946     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7947     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7948                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7949                  *this, FirstMaskArgument))
7950       return false;
7951   }
7952   return true;
7953 }
7954 
7955 SDValue
7956 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7957                                  bool IsVarArg,
7958                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7959                                  const SmallVectorImpl<SDValue> &OutVals,
7960                                  const SDLoc &DL, SelectionDAG &DAG) const {
7961   const MachineFunction &MF = DAG.getMachineFunction();
7962   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7963 
7964   // Stores the assignment of the return value to a location.
7965   SmallVector<CCValAssign, 16> RVLocs;
7966 
7967   // Info about the registers and stack slot.
7968   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7969                  *DAG.getContext());
7970 
7971   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7972                     nullptr, CC_RISCV);
7973 
7974   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7975     report_fatal_error("GHC functions return void only");
7976 
7977   SDValue Glue;
7978   SmallVector<SDValue, 4> RetOps(1, Chain);
7979 
7980   // Copy the result values into the output registers.
7981   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7982     SDValue Val = OutVals[i];
7983     CCValAssign &VA = RVLocs[i];
7984     assert(VA.isRegLoc() && "Can only return in registers!");
7985 
7986     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7987       // Handle returning f64 on RV32D with a soft float ABI.
7988       assert(VA.isRegLoc() && "Expected return via registers");
7989       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7990                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7991       SDValue Lo = SplitF64.getValue(0);
7992       SDValue Hi = SplitF64.getValue(1);
7993       Register RegLo = VA.getLocReg();
7994       assert(RegLo < RISCV::X31 && "Invalid register pair");
7995       Register RegHi = RegLo + 1;
7996 
7997       if (STI.isRegisterReservedByUser(RegLo) ||
7998           STI.isRegisterReservedByUser(RegHi))
7999         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8000             MF.getFunction(),
8001             "Return value register required, but has been reserved."});
8002 
8003       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
8004       Glue = Chain.getValue(1);
8005       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
8006       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
8007       Glue = Chain.getValue(1);
8008       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
8009     } else {
8010       // Handle a 'normal' return.
8011       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
8012       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
8013 
8014       if (STI.isRegisterReservedByUser(VA.getLocReg()))
8015         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8016             MF.getFunction(),
8017             "Return value register required, but has been reserved."});
8018 
8019       // Guarantee that all emitted copies are stuck together.
8020       Glue = Chain.getValue(1);
8021       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
8022     }
8023   }
8024 
8025   RetOps[0] = Chain; // Update chain.
8026 
8027   // Add the glue node if we have it.
8028   if (Glue.getNode()) {
8029     RetOps.push_back(Glue);
8030   }
8031 
8032   unsigned RetOpc = RISCVISD::RET_FLAG;
8033   // Interrupt service routines use different return instructions.
8034   const Function &Func = DAG.getMachineFunction().getFunction();
8035   if (Func.hasFnAttribute("interrupt")) {
8036     if (!Func.getReturnType()->isVoidTy())
8037       report_fatal_error(
8038           "Functions with the interrupt attribute must have void return type!");
8039 
8040     MachineFunction &MF = DAG.getMachineFunction();
8041     StringRef Kind =
8042       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8043 
8044     if (Kind == "user")
8045       RetOpc = RISCVISD::URET_FLAG;
8046     else if (Kind == "supervisor")
8047       RetOpc = RISCVISD::SRET_FLAG;
8048     else
8049       RetOpc = RISCVISD::MRET_FLAG;
8050   }
8051 
8052   return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
8053 }
8054 
8055 void RISCVTargetLowering::validateCCReservedRegs(
8056     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
8057     MachineFunction &MF) const {
8058   const Function &F = MF.getFunction();
8059   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8060 
8061   if (llvm::any_of(Regs, [&STI](auto Reg) {
8062         return STI.isRegisterReservedByUser(Reg.first);
8063       }))
8064     F.getContext().diagnose(DiagnosticInfoUnsupported{
8065         F, "Argument register required, but has been reserved."});
8066 }
8067 
8068 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
8069   return CI->isTailCall();
8070 }
8071 
8072 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
8073 #define NODE_NAME_CASE(NODE)                                                   \
8074   case RISCVISD::NODE:                                                         \
8075     return "RISCVISD::" #NODE;
8076   // clang-format off
8077   switch ((RISCVISD::NodeType)Opcode) {
8078   case RISCVISD::FIRST_NUMBER:
8079     break;
8080   NODE_NAME_CASE(RET_FLAG)
8081   NODE_NAME_CASE(URET_FLAG)
8082   NODE_NAME_CASE(SRET_FLAG)
8083   NODE_NAME_CASE(MRET_FLAG)
8084   NODE_NAME_CASE(CALL)
8085   NODE_NAME_CASE(SELECT_CC)
8086   NODE_NAME_CASE(BR_CC)
8087   NODE_NAME_CASE(BuildPairF64)
8088   NODE_NAME_CASE(SplitF64)
8089   NODE_NAME_CASE(TAIL)
8090   NODE_NAME_CASE(MULHSU)
8091   NODE_NAME_CASE(SLLW)
8092   NODE_NAME_CASE(SRAW)
8093   NODE_NAME_CASE(SRLW)
8094   NODE_NAME_CASE(DIVW)
8095   NODE_NAME_CASE(DIVUW)
8096   NODE_NAME_CASE(REMUW)
8097   NODE_NAME_CASE(ROLW)
8098   NODE_NAME_CASE(RORW)
8099   NODE_NAME_CASE(CLZW)
8100   NODE_NAME_CASE(CTZW)
8101   NODE_NAME_CASE(FSLW)
8102   NODE_NAME_CASE(FSRW)
8103   NODE_NAME_CASE(FSL)
8104   NODE_NAME_CASE(FSR)
8105   NODE_NAME_CASE(FMV_H_X)
8106   NODE_NAME_CASE(FMV_X_ANYEXTH)
8107   NODE_NAME_CASE(FMV_W_X_RV64)
8108   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8109   NODE_NAME_CASE(READ_CYCLE_WIDE)
8110   NODE_NAME_CASE(GREV)
8111   NODE_NAME_CASE(GREVW)
8112   NODE_NAME_CASE(GORC)
8113   NODE_NAME_CASE(GORCW)
8114   NODE_NAME_CASE(SHFL)
8115   NODE_NAME_CASE(SHFLW)
8116   NODE_NAME_CASE(UNSHFL)
8117   NODE_NAME_CASE(UNSHFLW)
8118   NODE_NAME_CASE(BCOMPRESS)
8119   NODE_NAME_CASE(BCOMPRESSW)
8120   NODE_NAME_CASE(BDECOMPRESS)
8121   NODE_NAME_CASE(BDECOMPRESSW)
8122   NODE_NAME_CASE(VMV_V_X_VL)
8123   NODE_NAME_CASE(VFMV_V_F_VL)
8124   NODE_NAME_CASE(VMV_X_S)
8125   NODE_NAME_CASE(VMV_S_X_VL)
8126   NODE_NAME_CASE(VFMV_S_F_VL)
8127   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8128   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8129   NODE_NAME_CASE(READ_VLENB)
8130   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8131   NODE_NAME_CASE(VSLIDEUP_VL)
8132   NODE_NAME_CASE(VSLIDE1UP_VL)
8133   NODE_NAME_CASE(VSLIDEDOWN_VL)
8134   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8135   NODE_NAME_CASE(VID_VL)
8136   NODE_NAME_CASE(VFNCVT_ROD_VL)
8137   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8138   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8139   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8140   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8141   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8142   NODE_NAME_CASE(VECREDUCE_AND_VL)
8143   NODE_NAME_CASE(VECREDUCE_OR_VL)
8144   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8145   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8146   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8147   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8148   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8149   NODE_NAME_CASE(ADD_VL)
8150   NODE_NAME_CASE(AND_VL)
8151   NODE_NAME_CASE(MUL_VL)
8152   NODE_NAME_CASE(OR_VL)
8153   NODE_NAME_CASE(SDIV_VL)
8154   NODE_NAME_CASE(SHL_VL)
8155   NODE_NAME_CASE(SREM_VL)
8156   NODE_NAME_CASE(SRA_VL)
8157   NODE_NAME_CASE(SRL_VL)
8158   NODE_NAME_CASE(SUB_VL)
8159   NODE_NAME_CASE(UDIV_VL)
8160   NODE_NAME_CASE(UREM_VL)
8161   NODE_NAME_CASE(XOR_VL)
8162   NODE_NAME_CASE(FADD_VL)
8163   NODE_NAME_CASE(FSUB_VL)
8164   NODE_NAME_CASE(FMUL_VL)
8165   NODE_NAME_CASE(FDIV_VL)
8166   NODE_NAME_CASE(FNEG_VL)
8167   NODE_NAME_CASE(FABS_VL)
8168   NODE_NAME_CASE(FSQRT_VL)
8169   NODE_NAME_CASE(FMA_VL)
8170   NODE_NAME_CASE(FCOPYSIGN_VL)
8171   NODE_NAME_CASE(SMIN_VL)
8172   NODE_NAME_CASE(SMAX_VL)
8173   NODE_NAME_CASE(UMIN_VL)
8174   NODE_NAME_CASE(UMAX_VL)
8175   NODE_NAME_CASE(FMINNUM_VL)
8176   NODE_NAME_CASE(FMAXNUM_VL)
8177   NODE_NAME_CASE(MULHS_VL)
8178   NODE_NAME_CASE(MULHU_VL)
8179   NODE_NAME_CASE(FP_TO_SINT_VL)
8180   NODE_NAME_CASE(FP_TO_UINT_VL)
8181   NODE_NAME_CASE(SINT_TO_FP_VL)
8182   NODE_NAME_CASE(UINT_TO_FP_VL)
8183   NODE_NAME_CASE(FP_EXTEND_VL)
8184   NODE_NAME_CASE(FP_ROUND_VL)
8185   NODE_NAME_CASE(SETCC_VL)
8186   NODE_NAME_CASE(VSELECT_VL)
8187   NODE_NAME_CASE(VMAND_VL)
8188   NODE_NAME_CASE(VMOR_VL)
8189   NODE_NAME_CASE(VMXOR_VL)
8190   NODE_NAME_CASE(VMCLR_VL)
8191   NODE_NAME_CASE(VMSET_VL)
8192   NODE_NAME_CASE(VRGATHER_VX_VL)
8193   NODE_NAME_CASE(VRGATHER_VV_VL)
8194   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8195   NODE_NAME_CASE(VSEXT_VL)
8196   NODE_NAME_CASE(VZEXT_VL)
8197   NODE_NAME_CASE(VPOPC_VL)
8198   NODE_NAME_CASE(VLE_VL)
8199   NODE_NAME_CASE(VSE_VL)
8200   NODE_NAME_CASE(READ_CSR)
8201   NODE_NAME_CASE(WRITE_CSR)
8202   NODE_NAME_CASE(SWAP_CSR)
8203   }
8204   // clang-format on
8205   return nullptr;
8206 #undef NODE_NAME_CASE
8207 }
8208 
8209 /// getConstraintType - Given a constraint letter, return the type of
8210 /// constraint it is for this target.
8211 RISCVTargetLowering::ConstraintType
8212 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8213   if (Constraint.size() == 1) {
8214     switch (Constraint[0]) {
8215     default:
8216       break;
8217     case 'f':
8218     case 'v':
8219       return C_RegisterClass;
8220     case 'I':
8221     case 'J':
8222     case 'K':
8223       return C_Immediate;
8224     case 'A':
8225       return C_Memory;
8226     }
8227   }
8228   return TargetLowering::getConstraintType(Constraint);
8229 }
8230 
8231 std::pair<unsigned, const TargetRegisterClass *>
8232 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8233                                                   StringRef Constraint,
8234                                                   MVT VT) const {
8235   // First, see if this is a constraint that directly corresponds to a
8236   // RISCV register class.
8237   if (Constraint.size() == 1) {
8238     switch (Constraint[0]) {
8239     case 'r':
8240       return std::make_pair(0U, &RISCV::GPRRegClass);
8241     case 'f':
8242       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8243         return std::make_pair(0U, &RISCV::FPR16RegClass);
8244       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8245         return std::make_pair(0U, &RISCV::FPR32RegClass);
8246       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8247         return std::make_pair(0U, &RISCV::FPR64RegClass);
8248       break;
8249     case 'v':
8250       for (const auto *RC :
8251            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8252             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8253         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8254           return std::make_pair(0U, RC);
8255       }
8256       break;
8257     default:
8258       break;
8259     }
8260   }
8261 
8262   // Clang will correctly decode the usage of register name aliases into their
8263   // official names. However, other frontends like `rustc` do not. This allows
8264   // users of these frontends to use the ABI names for registers in LLVM-style
8265   // register constraints.
8266   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8267                                .Case("{zero}", RISCV::X0)
8268                                .Case("{ra}", RISCV::X1)
8269                                .Case("{sp}", RISCV::X2)
8270                                .Case("{gp}", RISCV::X3)
8271                                .Case("{tp}", RISCV::X4)
8272                                .Case("{t0}", RISCV::X5)
8273                                .Case("{t1}", RISCV::X6)
8274                                .Case("{t2}", RISCV::X7)
8275                                .Cases("{s0}", "{fp}", RISCV::X8)
8276                                .Case("{s1}", RISCV::X9)
8277                                .Case("{a0}", RISCV::X10)
8278                                .Case("{a1}", RISCV::X11)
8279                                .Case("{a2}", RISCV::X12)
8280                                .Case("{a3}", RISCV::X13)
8281                                .Case("{a4}", RISCV::X14)
8282                                .Case("{a5}", RISCV::X15)
8283                                .Case("{a6}", RISCV::X16)
8284                                .Case("{a7}", RISCV::X17)
8285                                .Case("{s2}", RISCV::X18)
8286                                .Case("{s3}", RISCV::X19)
8287                                .Case("{s4}", RISCV::X20)
8288                                .Case("{s5}", RISCV::X21)
8289                                .Case("{s6}", RISCV::X22)
8290                                .Case("{s7}", RISCV::X23)
8291                                .Case("{s8}", RISCV::X24)
8292                                .Case("{s9}", RISCV::X25)
8293                                .Case("{s10}", RISCV::X26)
8294                                .Case("{s11}", RISCV::X27)
8295                                .Case("{t3}", RISCV::X28)
8296                                .Case("{t4}", RISCV::X29)
8297                                .Case("{t5}", RISCV::X30)
8298                                .Case("{t6}", RISCV::X31)
8299                                .Default(RISCV::NoRegister);
8300   if (XRegFromAlias != RISCV::NoRegister)
8301     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8302 
8303   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8304   // TableGen record rather than the AsmName to choose registers for InlineAsm
8305   // constraints, plus we want to match those names to the widest floating point
8306   // register type available, manually select floating point registers here.
8307   //
8308   // The second case is the ABI name of the register, so that frontends can also
8309   // use the ABI names in register constraint lists.
8310   if (Subtarget.hasStdExtF()) {
8311     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8312                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8313                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8314                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8315                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8316                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8317                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8318                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8319                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8320                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8321                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8322                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8323                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8324                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8325                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8326                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8327                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8328                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8329                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8330                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8331                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8332                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8333                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8334                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8335                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8336                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8337                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8338                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8339                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8340                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8341                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8342                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8343                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8344                         .Default(RISCV::NoRegister);
8345     if (FReg != RISCV::NoRegister) {
8346       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8347       if (Subtarget.hasStdExtD()) {
8348         unsigned RegNo = FReg - RISCV::F0_F;
8349         unsigned DReg = RISCV::F0_D + RegNo;
8350         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8351       }
8352       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8353     }
8354   }
8355 
8356   if (Subtarget.hasStdExtV()) {
8357     Register VReg = StringSwitch<Register>(Constraint.lower())
8358                         .Case("{v0}", RISCV::V0)
8359                         .Case("{v1}", RISCV::V1)
8360                         .Case("{v2}", RISCV::V2)
8361                         .Case("{v3}", RISCV::V3)
8362                         .Case("{v4}", RISCV::V4)
8363                         .Case("{v5}", RISCV::V5)
8364                         .Case("{v6}", RISCV::V6)
8365                         .Case("{v7}", RISCV::V7)
8366                         .Case("{v8}", RISCV::V8)
8367                         .Case("{v9}", RISCV::V9)
8368                         .Case("{v10}", RISCV::V10)
8369                         .Case("{v11}", RISCV::V11)
8370                         .Case("{v12}", RISCV::V12)
8371                         .Case("{v13}", RISCV::V13)
8372                         .Case("{v14}", RISCV::V14)
8373                         .Case("{v15}", RISCV::V15)
8374                         .Case("{v16}", RISCV::V16)
8375                         .Case("{v17}", RISCV::V17)
8376                         .Case("{v18}", RISCV::V18)
8377                         .Case("{v19}", RISCV::V19)
8378                         .Case("{v20}", RISCV::V20)
8379                         .Case("{v21}", RISCV::V21)
8380                         .Case("{v22}", RISCV::V22)
8381                         .Case("{v23}", RISCV::V23)
8382                         .Case("{v24}", RISCV::V24)
8383                         .Case("{v25}", RISCV::V25)
8384                         .Case("{v26}", RISCV::V26)
8385                         .Case("{v27}", RISCV::V27)
8386                         .Case("{v28}", RISCV::V28)
8387                         .Case("{v29}", RISCV::V29)
8388                         .Case("{v30}", RISCV::V30)
8389                         .Case("{v31}", RISCV::V31)
8390                         .Default(RISCV::NoRegister);
8391     if (VReg != RISCV::NoRegister) {
8392       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8393         return std::make_pair(VReg, &RISCV::VMRegClass);
8394       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8395         return std::make_pair(VReg, &RISCV::VRRegClass);
8396       for (const auto *RC :
8397            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8398         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8399           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8400           return std::make_pair(VReg, RC);
8401         }
8402       }
8403     }
8404   }
8405 
8406   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8407 }
8408 
8409 unsigned
8410 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8411   // Currently only support length 1 constraints.
8412   if (ConstraintCode.size() == 1) {
8413     switch (ConstraintCode[0]) {
8414     case 'A':
8415       return InlineAsm::Constraint_A;
8416     default:
8417       break;
8418     }
8419   }
8420 
8421   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8422 }
8423 
8424 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8425     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8426     SelectionDAG &DAG) const {
8427   // Currently only support length 1 constraints.
8428   if (Constraint.length() == 1) {
8429     switch (Constraint[0]) {
8430     case 'I':
8431       // Validate & create a 12-bit signed immediate operand.
8432       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8433         uint64_t CVal = C->getSExtValue();
8434         if (isInt<12>(CVal))
8435           Ops.push_back(
8436               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8437       }
8438       return;
8439     case 'J':
8440       // Validate & create an integer zero operand.
8441       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8442         if (C->getZExtValue() == 0)
8443           Ops.push_back(
8444               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8445       return;
8446     case 'K':
8447       // Validate & create a 5-bit unsigned immediate operand.
8448       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8449         uint64_t CVal = C->getZExtValue();
8450         if (isUInt<5>(CVal))
8451           Ops.push_back(
8452               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8453       }
8454       return;
8455     default:
8456       break;
8457     }
8458   }
8459   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8460 }
8461 
8462 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8463                                                    Instruction *Inst,
8464                                                    AtomicOrdering Ord) const {
8465   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8466     return Builder.CreateFence(Ord);
8467   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8468     return Builder.CreateFence(AtomicOrdering::Release);
8469   return nullptr;
8470 }
8471 
8472 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8473                                                     Instruction *Inst,
8474                                                     AtomicOrdering Ord) const {
8475   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8476     return Builder.CreateFence(AtomicOrdering::Acquire);
8477   return nullptr;
8478 }
8479 
8480 TargetLowering::AtomicExpansionKind
8481 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8482   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8483   // point operations can't be used in an lr/sc sequence without breaking the
8484   // forward-progress guarantee.
8485   if (AI->isFloatingPointOperation())
8486     return AtomicExpansionKind::CmpXChg;
8487 
8488   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8489   if (Size == 8 || Size == 16)
8490     return AtomicExpansionKind::MaskedIntrinsic;
8491   return AtomicExpansionKind::None;
8492 }
8493 
8494 static Intrinsic::ID
8495 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8496   if (XLen == 32) {
8497     switch (BinOp) {
8498     default:
8499       llvm_unreachable("Unexpected AtomicRMW BinOp");
8500     case AtomicRMWInst::Xchg:
8501       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8502     case AtomicRMWInst::Add:
8503       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8504     case AtomicRMWInst::Sub:
8505       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8506     case AtomicRMWInst::Nand:
8507       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8508     case AtomicRMWInst::Max:
8509       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8510     case AtomicRMWInst::Min:
8511       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8512     case AtomicRMWInst::UMax:
8513       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8514     case AtomicRMWInst::UMin:
8515       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8516     }
8517   }
8518 
8519   if (XLen == 64) {
8520     switch (BinOp) {
8521     default:
8522       llvm_unreachable("Unexpected AtomicRMW BinOp");
8523     case AtomicRMWInst::Xchg:
8524       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8525     case AtomicRMWInst::Add:
8526       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8527     case AtomicRMWInst::Sub:
8528       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8529     case AtomicRMWInst::Nand:
8530       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8531     case AtomicRMWInst::Max:
8532       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8533     case AtomicRMWInst::Min:
8534       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8535     case AtomicRMWInst::UMax:
8536       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8537     case AtomicRMWInst::UMin:
8538       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8539     }
8540   }
8541 
8542   llvm_unreachable("Unexpected XLen\n");
8543 }
8544 
8545 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8546     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8547     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8548   unsigned XLen = Subtarget.getXLen();
8549   Value *Ordering =
8550       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8551   Type *Tys[] = {AlignedAddr->getType()};
8552   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8553       AI->getModule(),
8554       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8555 
8556   if (XLen == 64) {
8557     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8558     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8559     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8560   }
8561 
8562   Value *Result;
8563 
8564   // Must pass the shift amount needed to sign extend the loaded value prior
8565   // to performing a signed comparison for min/max. ShiftAmt is the number of
8566   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8567   // is the number of bits to left+right shift the value in order to
8568   // sign-extend.
8569   if (AI->getOperation() == AtomicRMWInst::Min ||
8570       AI->getOperation() == AtomicRMWInst::Max) {
8571     const DataLayout &DL = AI->getModule()->getDataLayout();
8572     unsigned ValWidth =
8573         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8574     Value *SextShamt =
8575         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8576     Result = Builder.CreateCall(LrwOpScwLoop,
8577                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8578   } else {
8579     Result =
8580         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8581   }
8582 
8583   if (XLen == 64)
8584     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8585   return Result;
8586 }
8587 
8588 TargetLowering::AtomicExpansionKind
8589 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8590     AtomicCmpXchgInst *CI) const {
8591   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8592   if (Size == 8 || Size == 16)
8593     return AtomicExpansionKind::MaskedIntrinsic;
8594   return AtomicExpansionKind::None;
8595 }
8596 
8597 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8598     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8599     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8600   unsigned XLen = Subtarget.getXLen();
8601   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8602   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8603   if (XLen == 64) {
8604     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8605     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8606     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8607     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8608   }
8609   Type *Tys[] = {AlignedAddr->getType()};
8610   Function *MaskedCmpXchg =
8611       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8612   Value *Result = Builder.CreateCall(
8613       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8614   if (XLen == 64)
8615     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8616   return Result;
8617 }
8618 
8619 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8620   return false;
8621 }
8622 
8623 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8624                                                      EVT VT) const {
8625   VT = VT.getScalarType();
8626 
8627   if (!VT.isSimple())
8628     return false;
8629 
8630   switch (VT.getSimpleVT().SimpleTy) {
8631   case MVT::f16:
8632     return Subtarget.hasStdExtZfh();
8633   case MVT::f32:
8634     return Subtarget.hasStdExtF();
8635   case MVT::f64:
8636     return Subtarget.hasStdExtD();
8637   default:
8638     break;
8639   }
8640 
8641   return false;
8642 }
8643 
8644 Register RISCVTargetLowering::getExceptionPointerRegister(
8645     const Constant *PersonalityFn) const {
8646   return RISCV::X10;
8647 }
8648 
8649 Register RISCVTargetLowering::getExceptionSelectorRegister(
8650     const Constant *PersonalityFn) const {
8651   return RISCV::X11;
8652 }
8653 
8654 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8655   // Return false to suppress the unnecessary extensions if the LibCall
8656   // arguments or return value is f32 type for LP64 ABI.
8657   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8658   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8659     return false;
8660 
8661   return true;
8662 }
8663 
8664 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8665   if (Subtarget.is64Bit() && Type == MVT::i32)
8666     return true;
8667 
8668   return IsSigned;
8669 }
8670 
8671 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8672                                                  SDValue C) const {
8673   // Check integral scalar types.
8674   if (VT.isScalarInteger()) {
8675     // Omit the optimization if the sub target has the M extension and the data
8676     // size exceeds XLen.
8677     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8678       return false;
8679     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8680       // Break the MUL to a SLLI and an ADD/SUB.
8681       const APInt &Imm = ConstNode->getAPIntValue();
8682       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8683           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8684         return true;
8685       // Omit the following optimization if the sub target has the M extension
8686       // and the data size >= XLen.
8687       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8688         return false;
8689       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8690       // a pair of LUI/ADDI.
8691       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8692         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8693         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8694             (1 - ImmS).isPowerOf2())
8695         return true;
8696       }
8697     }
8698   }
8699 
8700   return false;
8701 }
8702 
8703 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8704     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8705     bool *Fast) const {
8706   if (!VT.isVector())
8707     return false;
8708 
8709   EVT ElemVT = VT.getVectorElementType();
8710   if (Alignment >= ElemVT.getStoreSize()) {
8711     if (Fast)
8712       *Fast = true;
8713     return true;
8714   }
8715 
8716   return false;
8717 }
8718 
8719 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8720     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8721     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8722   bool IsABIRegCopy = CC.hasValue();
8723   EVT ValueVT = Val.getValueType();
8724   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8725     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8726     // and cast to f32.
8727     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8728     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8729     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8730                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8731     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8732     Parts[0] = Val;
8733     return true;
8734   }
8735 
8736   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8737     LLVMContext &Context = *DAG.getContext();
8738     EVT ValueEltVT = ValueVT.getVectorElementType();
8739     EVT PartEltVT = PartVT.getVectorElementType();
8740     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8741     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8742     if (PartVTBitSize % ValueVTBitSize == 0) {
8743       // If the element types are different, bitcast to the same element type of
8744       // PartVT first.
8745       if (ValueEltVT != PartEltVT) {
8746         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8747         assert(Count != 0 && "The number of element should not be zero.");
8748         EVT SameEltTypeVT =
8749             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8750         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8751       }
8752       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8753                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8754       Parts[0] = Val;
8755       return true;
8756     }
8757   }
8758   return false;
8759 }
8760 
8761 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8762     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8763     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8764   bool IsABIRegCopy = CC.hasValue();
8765   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8766     SDValue Val = Parts[0];
8767 
8768     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8769     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8770     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8771     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8772     return Val;
8773   }
8774 
8775   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8776     LLVMContext &Context = *DAG.getContext();
8777     SDValue Val = Parts[0];
8778     EVT ValueEltVT = ValueVT.getVectorElementType();
8779     EVT PartEltVT = PartVT.getVectorElementType();
8780     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8781     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8782     if (PartVTBitSize % ValueVTBitSize == 0) {
8783       EVT SameEltTypeVT = ValueVT;
8784       // If the element types are different, convert it to the same element type
8785       // of PartVT.
8786       if (ValueEltVT != PartEltVT) {
8787         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8788         assert(Count != 0 && "The number of element should not be zero.");
8789         SameEltTypeVT =
8790             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8791       }
8792       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8793                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8794       if (ValueEltVT != PartEltVT)
8795         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8796       return Val;
8797     }
8798   }
8799   return SDValue();
8800 }
8801 
8802 #define GET_REGISTER_MATCHER
8803 #include "RISCVGenAsmMatcher.inc"
8804 
8805 Register
8806 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8807                                        const MachineFunction &MF) const {
8808   Register Reg = MatchRegisterAltName(RegName);
8809   if (Reg == RISCV::NoRegister)
8810     Reg = MatchRegisterName(RegName);
8811   if (Reg == RISCV::NoRegister)
8812     report_fatal_error(
8813         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8814   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8815   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8816     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8817                              StringRef(RegName) + "\"."));
8818   return Reg;
8819 }
8820 
8821 namespace llvm {
8822 namespace RISCVVIntrinsicsTable {
8823 
8824 #define GET_RISCVVIntrinsicsTable_IMPL
8825 #include "RISCVGenSearchableTables.inc"
8826 
8827 } // namespace RISCVVIntrinsicsTable
8828 
8829 } // namespace llvm
8830