1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT,   Custom);
254     setOperationAction(ISD::BSWAP,      XLenVT,   Custom);
255     setOperationAction(ISD::BITREVERSE, MVT::i8,  Custom);
256     // BSWAP i8 doesn't exist.
257     setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
258     setOperationAction(ISD::BSWAP,      MVT::i16, Custom);
259 
260     if (Subtarget.is64Bit()) {
261       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
262       setOperationAction(ISD::BSWAP,      MVT::i32, Custom);
263     }
264   } else {
265     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
266     // pattern match it directly in isel.
267     setOperationAction(ISD::BSWAP, XLenVT,
268                        Subtarget.hasStdExtZbb() ? Legal : Expand);
269   }
270 
271   if (Subtarget.hasStdExtZbb()) {
272     setOperationAction(ISD::SMIN, XLenVT, Legal);
273     setOperationAction(ISD::SMAX, XLenVT, Legal);
274     setOperationAction(ISD::UMIN, XLenVT, Legal);
275     setOperationAction(ISD::UMAX, XLenVT, Legal);
276 
277     if (Subtarget.is64Bit()) {
278       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
279       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
280       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
281       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
282     }
283   } else {
284     setOperationAction(ISD::CTTZ, XLenVT, Expand);
285     setOperationAction(ISD::CTLZ, XLenVT, Expand);
286     setOperationAction(ISD::CTPOP, XLenVT, Expand);
287   }
288 
289   if (Subtarget.hasStdExtZbt()) {
290     setOperationAction(ISD::FSHL, XLenVT, Custom);
291     setOperationAction(ISD::FSHR, XLenVT, Custom);
292     setOperationAction(ISD::SELECT, XLenVT, Legal);
293 
294     if (Subtarget.is64Bit()) {
295       setOperationAction(ISD::FSHL, MVT::i32, Custom);
296       setOperationAction(ISD::FSHR, MVT::i32, Custom);
297     }
298   } else {
299     setOperationAction(ISD::SELECT, XLenVT, Custom);
300   }
301 
302   ISD::CondCode FPCCToExpand[] = {
303       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
304       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
305       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
306 
307   ISD::NodeType FPOpToExpand[] = {
308       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
309       ISD::FP_TO_FP16};
310 
311   if (Subtarget.hasStdExtZfh())
312     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
313 
314   if (Subtarget.hasStdExtZfh()) {
315     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
316     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
317     for (auto CC : FPCCToExpand)
318       setCondCodeAction(CC, MVT::f16, Expand);
319     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
320     setOperationAction(ISD::SELECT, MVT::f16, Custom);
321     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
322     for (auto Op : FPOpToExpand)
323       setOperationAction(Op, MVT::f16, Expand);
324   }
325 
326   if (Subtarget.hasStdExtF()) {
327     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
328     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
329     for (auto CC : FPCCToExpand)
330       setCondCodeAction(CC, MVT::f32, Expand);
331     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
332     setOperationAction(ISD::SELECT, MVT::f32, Custom);
333     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
334     for (auto Op : FPOpToExpand)
335       setOperationAction(Op, MVT::f32, Expand);
336     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
337     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
338   }
339 
340   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
341     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
342 
343   if (Subtarget.hasStdExtD()) {
344     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
345     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
346     for (auto CC : FPCCToExpand)
347       setCondCodeAction(CC, MVT::f64, Expand);
348     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
349     setOperationAction(ISD::SELECT, MVT::f64, Custom);
350     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
353     for (auto Op : FPOpToExpand)
354       setOperationAction(Op, MVT::f64, Expand);
355     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
356     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
357   }
358 
359   if (Subtarget.is64Bit()) {
360     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
361     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
362     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
363     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
364   }
365 
366   if (Subtarget.hasStdExtF()) {
367     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
368     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
369   }
370 
371   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
372   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
373   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
374   setOperationAction(ISD::JumpTable, XLenVT, Custom);
375 
376   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
377 
378   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
379   // Unfortunately this can't be determined just from the ISA naming string.
380   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
381                      Subtarget.is64Bit() ? Legal : Custom);
382 
383   setOperationAction(ISD::TRAP, MVT::Other, Legal);
384   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
385   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
386   if (Subtarget.is64Bit())
387     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
388 
389   if (Subtarget.hasStdExtA()) {
390     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
391     setMinCmpXchgSizeInBits(32);
392   } else {
393     setMaxAtomicSizeInBitsSupported(0);
394   }
395 
396   setBooleanContents(ZeroOrOneBooleanContent);
397 
398   if (Subtarget.hasStdExtV()) {
399     setBooleanVectorContents(ZeroOrOneBooleanContent);
400 
401     setOperationAction(ISD::VSCALE, XLenVT, Custom);
402 
403     // RVV intrinsics may have illegal operands.
404     // We also need to custom legalize vmv.x.s.
405     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
406     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
407     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
408     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
409     if (Subtarget.is64Bit()) {
410       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
411     } else {
412       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
413       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
414     }
415 
416     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
417 
418     static unsigned IntegerVPOps[] = {
419         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
420         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
421         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
422 
423     static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB,
424                                             ISD::VP_FMUL, ISD::VP_FDIV};
425 
426     if (!Subtarget.is64Bit()) {
427       // We must custom-lower certain vXi64 operations on RV32 due to the vector
428       // element type being illegal.
429       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
430       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
431 
432       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
433       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
434       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
435       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
436       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
437       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
438       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
439       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
440     }
441 
442     for (MVT VT : BoolVecVTs) {
443       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
444 
445       // Mask VTs are custom-expanded into a series of standard nodes
446       setOperationAction(ISD::TRUNCATE, VT, Custom);
447       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
448       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
449       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
450 
451       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
452       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
453 
454       setOperationAction(ISD::SELECT, VT, Custom);
455       setOperationAction(ISD::SELECT_CC, VT, Expand);
456       setOperationAction(ISD::VSELECT, VT, Expand);
457 
458       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
459       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
460       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
461 
462       // RVV has native int->float & float->int conversions where the
463       // element type sizes are within one power-of-two of each other. Any
464       // wider distances between type sizes have to be lowered as sequences
465       // which progressively narrow the gap in stages.
466       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
467       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
468       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
469       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
470 
471       // Expand all extending loads to types larger than this, and truncating
472       // stores from types larger than this.
473       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
474         setTruncStoreAction(OtherVT, VT, Expand);
475         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
476         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
477         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
478       }
479     }
480 
481     for (MVT VT : IntVecVTs) {
482       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
483       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
484 
485       setOperationAction(ISD::SMIN, VT, Legal);
486       setOperationAction(ISD::SMAX, VT, Legal);
487       setOperationAction(ISD::UMIN, VT, Legal);
488       setOperationAction(ISD::UMAX, VT, Legal);
489 
490       setOperationAction(ISD::ROTL, VT, Expand);
491       setOperationAction(ISD::ROTR, VT, Expand);
492 
493       // Custom-lower extensions and truncations from/to mask types.
494       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
495       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
496       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
497 
498       // RVV has native int->float & float->int conversions where the
499       // element type sizes are within one power-of-two of each other. Any
500       // wider distances between type sizes have to be lowered as sequences
501       // which progressively narrow the gap in stages.
502       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
503       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
504       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
505       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
506 
507       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
508       // nodes which truncate by one power of two at a time.
509       setOperationAction(ISD::TRUNCATE, VT, Custom);
510 
511       // Custom-lower insert/extract operations to simplify patterns.
512       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
513       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
514 
515       // Custom-lower reduction operations to set up the corresponding custom
516       // nodes' operands.
517       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
518       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
519       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
520       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
521       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
522       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
523       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
524       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
525 
526       for (unsigned VPOpc : IntegerVPOps)
527         setOperationAction(VPOpc, VT, Custom);
528 
529       setOperationAction(ISD::LOAD, VT, Custom);
530       setOperationAction(ISD::STORE, VT, Custom);
531 
532       setOperationAction(ISD::MLOAD, VT, Custom);
533       setOperationAction(ISD::MSTORE, VT, Custom);
534       setOperationAction(ISD::MGATHER, VT, Custom);
535       setOperationAction(ISD::MSCATTER, VT, Custom);
536 
537       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
538       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
539       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
540 
541       setOperationAction(ISD::SELECT, VT, Custom);
542       setOperationAction(ISD::SELECT_CC, VT, Expand);
543 
544       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
545       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
546 
547       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
548         setTruncStoreAction(VT, OtherVT, Expand);
549         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
550         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
551         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
552       }
553     }
554 
555     // Expand various CCs to best match the RVV ISA, which natively supports UNE
556     // but no other unordered comparisons, and supports all ordered comparisons
557     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
558     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
559     // and we pattern-match those back to the "original", swapping operands once
560     // more. This way we catch both operations and both "vf" and "fv" forms with
561     // fewer patterns.
562     ISD::CondCode VFPCCToExpand[] = {
563         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
564         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
565         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
566     };
567 
568     // Sets common operation actions on RVV floating-point vector types.
569     const auto SetCommonVFPActions = [&](MVT VT) {
570       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
571       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
572       // sizes are within one power-of-two of each other. Therefore conversions
573       // between vXf16 and vXf64 must be lowered as sequences which convert via
574       // vXf32.
575       setOperationAction(ISD::FP_ROUND, VT, Custom);
576       setOperationAction(ISD::FP_EXTEND, VT, Custom);
577       // Custom-lower insert/extract operations to simplify patterns.
578       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
579       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
580       // Expand various condition codes (explained above).
581       for (auto CC : VFPCCToExpand)
582         setCondCodeAction(CC, VT, Expand);
583 
584       setOperationAction(ISD::FMINNUM, VT, Legal);
585       setOperationAction(ISD::FMAXNUM, VT, Legal);
586 
587       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
588       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
589       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
590       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
591       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
592 
593       setOperationAction(ISD::LOAD, VT, Custom);
594       setOperationAction(ISD::STORE, VT, Custom);
595 
596       setOperationAction(ISD::MLOAD, VT, Custom);
597       setOperationAction(ISD::MSTORE, VT, Custom);
598       setOperationAction(ISD::MGATHER, VT, Custom);
599       setOperationAction(ISD::MSCATTER, VT, Custom);
600 
601       setOperationAction(ISD::SELECT, VT, Custom);
602       setOperationAction(ISD::SELECT_CC, VT, Expand);
603 
604       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
605       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
606       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
607 
608       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
609 
610       for (unsigned VPOpc : FloatingPointVPOps)
611         setOperationAction(VPOpc, VT, Custom);
612     };
613 
614     // Sets common extload/truncstore actions on RVV floating-point vector
615     // types.
616     const auto SetCommonVFPExtLoadTruncStoreActions =
617         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
618           for (auto SmallVT : SmallerVTs) {
619             setTruncStoreAction(VT, SmallVT, Expand);
620             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
621           }
622         };
623 
624     if (Subtarget.hasStdExtZfh())
625       for (MVT VT : F16VecVTs)
626         SetCommonVFPActions(VT);
627 
628     for (MVT VT : F32VecVTs) {
629       if (Subtarget.hasStdExtF())
630         SetCommonVFPActions(VT);
631       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
632     }
633 
634     for (MVT VT : F64VecVTs) {
635       if (Subtarget.hasStdExtD())
636         SetCommonVFPActions(VT);
637       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
638       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
639     }
640 
641     if (Subtarget.useRVVForFixedLengthVectors()) {
642       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
643         if (!useRVVForFixedLengthVectorVT(VT))
644           continue;
645 
646         // By default everything must be expanded.
647         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
648           setOperationAction(Op, VT, Expand);
649         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
650           setTruncStoreAction(VT, OtherVT, Expand);
651           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
652           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
653           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
654         }
655 
656         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
657         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
658         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
659 
660         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
661         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
662 
663         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
664         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
665 
666         setOperationAction(ISD::LOAD, VT, Custom);
667         setOperationAction(ISD::STORE, VT, Custom);
668 
669         setOperationAction(ISD::SETCC, VT, Custom);
670 
671         setOperationAction(ISD::SELECT, VT, Custom);
672 
673         setOperationAction(ISD::TRUNCATE, VT, Custom);
674 
675         setOperationAction(ISD::BITCAST, VT, Custom);
676 
677         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
678         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
679         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
680 
681         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
682         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
683         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
684         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
685 
686         // Operations below are different for between masks and other vectors.
687         if (VT.getVectorElementType() == MVT::i1) {
688           setOperationAction(ISD::AND, VT, Custom);
689           setOperationAction(ISD::OR, VT, Custom);
690           setOperationAction(ISD::XOR, VT, Custom);
691           continue;
692         }
693 
694         // Use SPLAT_VECTOR to prevent type legalization from destroying the
695         // splats when type legalizing i64 scalar on RV32.
696         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
697         // improvements first.
698         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
699           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
700           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
701         }
702 
703         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
704         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
705 
706         setOperationAction(ISD::MLOAD, VT, Custom);
707         setOperationAction(ISD::MSTORE, VT, Custom);
708         setOperationAction(ISD::MGATHER, VT, Custom);
709         setOperationAction(ISD::MSCATTER, VT, Custom);
710         setOperationAction(ISD::ADD, VT, Custom);
711         setOperationAction(ISD::MUL, VT, Custom);
712         setOperationAction(ISD::SUB, VT, Custom);
713         setOperationAction(ISD::AND, VT, Custom);
714         setOperationAction(ISD::OR, VT, Custom);
715         setOperationAction(ISD::XOR, VT, Custom);
716         setOperationAction(ISD::SDIV, VT, Custom);
717         setOperationAction(ISD::SREM, VT, Custom);
718         setOperationAction(ISD::UDIV, VT, Custom);
719         setOperationAction(ISD::UREM, VT, Custom);
720         setOperationAction(ISD::SHL, VT, Custom);
721         setOperationAction(ISD::SRA, VT, Custom);
722         setOperationAction(ISD::SRL, VT, Custom);
723 
724         setOperationAction(ISD::SMIN, VT, Custom);
725         setOperationAction(ISD::SMAX, VT, Custom);
726         setOperationAction(ISD::UMIN, VT, Custom);
727         setOperationAction(ISD::UMAX, VT, Custom);
728         setOperationAction(ISD::ABS,  VT, Custom);
729 
730         setOperationAction(ISD::MULHS, VT, Custom);
731         setOperationAction(ISD::MULHU, VT, Custom);
732 
733         setOperationAction(ISD::VSELECT, VT, Custom);
734         setOperationAction(ISD::SELECT_CC, VT, Expand);
735 
736         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
737         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
738         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
739 
740         // Custom-lower reduction operations to set up the corresponding custom
741         // nodes' operands.
742         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
743         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
744         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
745         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
746         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
747 
748         for (unsigned VPOpc : IntegerVPOps)
749           setOperationAction(VPOpc, VT, Custom);
750       }
751 
752       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
753         if (!useRVVForFixedLengthVectorVT(VT))
754           continue;
755 
756         // By default everything must be expanded.
757         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
758           setOperationAction(Op, VT, Expand);
759         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
760           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
761           setTruncStoreAction(VT, OtherVT, Expand);
762         }
763 
764         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
765         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
766         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
767 
768         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
769         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
770         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
771         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
772 
773         setOperationAction(ISD::LOAD, VT, Custom);
774         setOperationAction(ISD::STORE, VT, Custom);
775         setOperationAction(ISD::MLOAD, VT, Custom);
776         setOperationAction(ISD::MSTORE, VT, Custom);
777         setOperationAction(ISD::MGATHER, VT, Custom);
778         setOperationAction(ISD::MSCATTER, VT, Custom);
779         setOperationAction(ISD::FADD, VT, Custom);
780         setOperationAction(ISD::FSUB, VT, Custom);
781         setOperationAction(ISD::FMUL, VT, Custom);
782         setOperationAction(ISD::FDIV, VT, Custom);
783         setOperationAction(ISD::FNEG, VT, Custom);
784         setOperationAction(ISD::FABS, VT, Custom);
785         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
786         setOperationAction(ISD::FSQRT, VT, Custom);
787         setOperationAction(ISD::FMA, VT, Custom);
788         setOperationAction(ISD::FMINNUM, VT, Custom);
789         setOperationAction(ISD::FMAXNUM, VT, Custom);
790 
791         setOperationAction(ISD::FP_ROUND, VT, Custom);
792         setOperationAction(ISD::FP_EXTEND, VT, Custom);
793 
794         for (auto CC : VFPCCToExpand)
795           setCondCodeAction(CC, VT, Expand);
796 
797         setOperationAction(ISD::VSELECT, VT, Custom);
798         setOperationAction(ISD::SELECT, VT, Custom);
799         setOperationAction(ISD::SELECT_CC, VT, Expand);
800 
801         setOperationAction(ISD::BITCAST, VT, Custom);
802 
803         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
804         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
805         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
806         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
807 
808         for (unsigned VPOpc : FloatingPointVPOps)
809           setOperationAction(VPOpc, VT, Custom);
810       }
811 
812       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
813       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
814       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
815       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
816       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
817       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
818       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
819       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
820     }
821   }
822 
823   // Function alignments.
824   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
825   setMinFunctionAlignment(FunctionAlignment);
826   setPrefFunctionAlignment(FunctionAlignment);
827 
828   setMinimumJumpTableEntries(5);
829 
830   // Jumps are expensive, compared to logic
831   setJumpIsExpensive();
832 
833   // We can use any register for comparisons
834   setHasMultipleConditionRegisters();
835 
836   setTargetDAGCombine(ISD::AND);
837   setTargetDAGCombine(ISD::OR);
838   setTargetDAGCombine(ISD::XOR);
839   if (Subtarget.hasStdExtV()) {
840     setTargetDAGCombine(ISD::FCOPYSIGN);
841     setTargetDAGCombine(ISD::MGATHER);
842     setTargetDAGCombine(ISD::MSCATTER);
843     setTargetDAGCombine(ISD::SRA);
844     setTargetDAGCombine(ISD::SRL);
845     setTargetDAGCombine(ISD::SHL);
846   }
847 }
848 
849 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
850                                             LLVMContext &Context,
851                                             EVT VT) const {
852   if (!VT.isVector())
853     return getPointerTy(DL);
854   if (Subtarget.hasStdExtV() &&
855       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
856     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
857   return VT.changeVectorElementTypeToInteger();
858 }
859 
860 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
861   return Subtarget.getXLenVT();
862 }
863 
864 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
865                                              const CallInst &I,
866                                              MachineFunction &MF,
867                                              unsigned Intrinsic) const {
868   switch (Intrinsic) {
869   default:
870     return false;
871   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
872   case Intrinsic::riscv_masked_atomicrmw_add_i32:
873   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
874   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
875   case Intrinsic::riscv_masked_atomicrmw_max_i32:
876   case Intrinsic::riscv_masked_atomicrmw_min_i32:
877   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
878   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
879   case Intrinsic::riscv_masked_cmpxchg_i32:
880     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
881     Info.opc = ISD::INTRINSIC_W_CHAIN;
882     Info.memVT = MVT::getVT(PtrTy->getElementType());
883     Info.ptrVal = I.getArgOperand(0);
884     Info.offset = 0;
885     Info.align = Align(4);
886     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
887                  MachineMemOperand::MOVolatile;
888     return true;
889   }
890 }
891 
892 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
893                                                 const AddrMode &AM, Type *Ty,
894                                                 unsigned AS,
895                                                 Instruction *I) const {
896   // No global is ever allowed as a base.
897   if (AM.BaseGV)
898     return false;
899 
900   // Require a 12-bit signed offset.
901   if (!isInt<12>(AM.BaseOffs))
902     return false;
903 
904   switch (AM.Scale) {
905   case 0: // "r+i" or just "i", depending on HasBaseReg.
906     break;
907   case 1:
908     if (!AM.HasBaseReg) // allow "r+i".
909       break;
910     return false; // disallow "r+r" or "r+r+i".
911   default:
912     return false;
913   }
914 
915   return true;
916 }
917 
918 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
919   return isInt<12>(Imm);
920 }
921 
922 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
923   return isInt<12>(Imm);
924 }
925 
926 // On RV32, 64-bit integers are split into their high and low parts and held
927 // in two different registers, so the trunc is free since the low register can
928 // just be used.
929 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
930   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
931     return false;
932   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
933   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
934   return (SrcBits == 64 && DestBits == 32);
935 }
936 
937 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
938   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
939       !SrcVT.isInteger() || !DstVT.isInteger())
940     return false;
941   unsigned SrcBits = SrcVT.getSizeInBits();
942   unsigned DestBits = DstVT.getSizeInBits();
943   return (SrcBits == 64 && DestBits == 32);
944 }
945 
946 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
947   // Zexts are free if they can be combined with a load.
948   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
949     EVT MemVT = LD->getMemoryVT();
950     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
951          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
952         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
953          LD->getExtensionType() == ISD::ZEXTLOAD))
954       return true;
955   }
956 
957   return TargetLowering::isZExtFree(Val, VT2);
958 }
959 
960 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
961   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
962 }
963 
964 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
965   return Subtarget.hasStdExtZbb();
966 }
967 
968 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
969   return Subtarget.hasStdExtZbb();
970 }
971 
972 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
973                                        bool ForCodeSize) const {
974   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
975     return false;
976   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
977     return false;
978   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
979     return false;
980   if (Imm.isNegZero())
981     return false;
982   return Imm.isZero();
983 }
984 
985 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
986   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
987          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
988          (VT == MVT::f64 && Subtarget.hasStdExtD());
989 }
990 
991 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
992                                                       CallingConv::ID CC,
993                                                       EVT VT) const {
994   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
995   // end up using a GPR but that will be decided based on ABI.
996   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
997     return MVT::f32;
998 
999   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1000 }
1001 
1002 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1003                                                            CallingConv::ID CC,
1004                                                            EVT VT) const {
1005   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1006   // end up using a GPR but that will be decided based on ABI.
1007   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1008     return 1;
1009 
1010   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1011 }
1012 
1013 // Changes the condition code and swaps operands if necessary, so the SetCC
1014 // operation matches one of the comparisons supported directly by branches
1015 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1016 // with 1/-1.
1017 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1018                                     ISD::CondCode &CC, SelectionDAG &DAG) {
1019   // Convert X > -1 to X >= 0.
1020   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1021     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1022     CC = ISD::SETGE;
1023     return;
1024   }
1025   // Convert X < 1 to 0 >= X.
1026   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1027     RHS = LHS;
1028     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1029     CC = ISD::SETGE;
1030     return;
1031   }
1032 
1033   switch (CC) {
1034   default:
1035     break;
1036   case ISD::SETGT:
1037   case ISD::SETLE:
1038   case ISD::SETUGT:
1039   case ISD::SETULE:
1040     CC = ISD::getSetCCSwappedOperands(CC);
1041     std::swap(LHS, RHS);
1042     break;
1043   }
1044 }
1045 
1046 // Return the RISC-V branch opcode that matches the given DAG integer
1047 // condition code. The CondCode must be one of those supported by the RISC-V
1048 // ISA (see translateSetCCForBranch).
1049 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1050   switch (CC) {
1051   default:
1052     llvm_unreachable("Unsupported CondCode");
1053   case ISD::SETEQ:
1054     return RISCV::BEQ;
1055   case ISD::SETNE:
1056     return RISCV::BNE;
1057   case ISD::SETLT:
1058     return RISCV::BLT;
1059   case ISD::SETGE:
1060     return RISCV::BGE;
1061   case ISD::SETULT:
1062     return RISCV::BLTU;
1063   case ISD::SETUGE:
1064     return RISCV::BGEU;
1065   }
1066 }
1067 
1068 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1069   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1070   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1071   if (VT.getVectorElementType() == MVT::i1)
1072     KnownSize *= 8;
1073 
1074   switch (KnownSize) {
1075   default:
1076     llvm_unreachable("Invalid LMUL.");
1077   case 8:
1078     return RISCVII::VLMUL::LMUL_F8;
1079   case 16:
1080     return RISCVII::VLMUL::LMUL_F4;
1081   case 32:
1082     return RISCVII::VLMUL::LMUL_F2;
1083   case 64:
1084     return RISCVII::VLMUL::LMUL_1;
1085   case 128:
1086     return RISCVII::VLMUL::LMUL_2;
1087   case 256:
1088     return RISCVII::VLMUL::LMUL_4;
1089   case 512:
1090     return RISCVII::VLMUL::LMUL_8;
1091   }
1092 }
1093 
1094 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1095   switch (LMul) {
1096   default:
1097     llvm_unreachable("Invalid LMUL.");
1098   case RISCVII::VLMUL::LMUL_F8:
1099   case RISCVII::VLMUL::LMUL_F4:
1100   case RISCVII::VLMUL::LMUL_F2:
1101   case RISCVII::VLMUL::LMUL_1:
1102     return RISCV::VRRegClassID;
1103   case RISCVII::VLMUL::LMUL_2:
1104     return RISCV::VRM2RegClassID;
1105   case RISCVII::VLMUL::LMUL_4:
1106     return RISCV::VRM4RegClassID;
1107   case RISCVII::VLMUL::LMUL_8:
1108     return RISCV::VRM8RegClassID;
1109   }
1110 }
1111 
1112 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1113   RISCVII::VLMUL LMUL = getLMUL(VT);
1114   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1115       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1116       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1117       LMUL == RISCVII::VLMUL::LMUL_1) {
1118     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1119                   "Unexpected subreg numbering");
1120     return RISCV::sub_vrm1_0 + Index;
1121   }
1122   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1123     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1124                   "Unexpected subreg numbering");
1125     return RISCV::sub_vrm2_0 + Index;
1126   }
1127   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1128     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1129                   "Unexpected subreg numbering");
1130     return RISCV::sub_vrm4_0 + Index;
1131   }
1132   llvm_unreachable("Invalid vector type.");
1133 }
1134 
1135 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1136   if (VT.getVectorElementType() == MVT::i1)
1137     return RISCV::VRRegClassID;
1138   return getRegClassIDForLMUL(getLMUL(VT));
1139 }
1140 
1141 // Attempt to decompose a subvector insert/extract between VecVT and
1142 // SubVecVT via subregister indices. Returns the subregister index that
1143 // can perform the subvector insert/extract with the given element index, as
1144 // well as the index corresponding to any leftover subvectors that must be
1145 // further inserted/extracted within the register class for SubVecVT.
1146 std::pair<unsigned, unsigned>
1147 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1148     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1149     const RISCVRegisterInfo *TRI) {
1150   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1151                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1152                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1153                 "Register classes not ordered");
1154   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1155   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1156   // Try to compose a subregister index that takes us from the incoming
1157   // LMUL>1 register class down to the outgoing one. At each step we half
1158   // the LMUL:
1159   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1160   // Note that this is not guaranteed to find a subregister index, such as
1161   // when we are extracting from one VR type to another.
1162   unsigned SubRegIdx = RISCV::NoSubRegister;
1163   for (const unsigned RCID :
1164        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1165     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1166       VecVT = VecVT.getHalfNumVectorElementsVT();
1167       bool IsHi =
1168           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1169       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1170                                             getSubregIndexByMVT(VecVT, IsHi));
1171       if (IsHi)
1172         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1173     }
1174   return {SubRegIdx, InsertExtractIdx};
1175 }
1176 
1177 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1178 // stores for those types.
1179 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1180   return !Subtarget.useRVVForFixedLengthVectors() ||
1181          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1182 }
1183 
1184 static bool useRVVForFixedLengthVectorVT(MVT VT,
1185                                          const RISCVSubtarget &Subtarget) {
1186   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1187   if (!Subtarget.useRVVForFixedLengthVectors())
1188     return false;
1189 
1190   // We only support a set of vector types with a consistent maximum fixed size
1191   // across all supported vector element types to avoid legalization issues.
1192   // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1193   // fixed-length vector type we support is 1024 bytes.
1194   if (VT.getFixedSizeInBits() > 1024 * 8)
1195     return false;
1196 
1197   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1198 
1199   // Don't use RVV for vectors we cannot scalarize if required.
1200   switch (VT.getVectorElementType().SimpleTy) {
1201   // i1 is supported but has different rules.
1202   default:
1203     return false;
1204   case MVT::i1:
1205     // Masks can only use a single register.
1206     if (VT.getVectorNumElements() > MinVLen)
1207       return false;
1208     MinVLen /= 8;
1209     break;
1210   case MVT::i8:
1211   case MVT::i16:
1212   case MVT::i32:
1213   case MVT::i64:
1214     break;
1215   case MVT::f16:
1216     if (!Subtarget.hasStdExtZfh())
1217       return false;
1218     break;
1219   case MVT::f32:
1220     if (!Subtarget.hasStdExtF())
1221       return false;
1222     break;
1223   case MVT::f64:
1224     if (!Subtarget.hasStdExtD())
1225       return false;
1226     break;
1227   }
1228 
1229   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1230   // Don't use RVV for types that don't fit.
1231   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1232     return false;
1233 
1234   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1235   // the base fixed length RVV support in place.
1236   if (!VT.isPow2VectorType())
1237     return false;
1238 
1239   return true;
1240 }
1241 
1242 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1243   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1244 }
1245 
1246 // Return the largest legal scalable vector type that matches VT's element type.
1247 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1248                                             const RISCVSubtarget &Subtarget) {
1249   // This may be called before legal types are setup.
1250   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1251           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1252          "Expected legal fixed length vector!");
1253 
1254   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1255 
1256   MVT EltVT = VT.getVectorElementType();
1257   switch (EltVT.SimpleTy) {
1258   default:
1259     llvm_unreachable("unexpected element type for RVV container");
1260   case MVT::i1:
1261   case MVT::i8:
1262   case MVT::i16:
1263   case MVT::i32:
1264   case MVT::i64:
1265   case MVT::f16:
1266   case MVT::f32:
1267   case MVT::f64: {
1268     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1269     // narrower types, but we can't have a fractional LMUL with demoninator less
1270     // than 64/SEW.
1271     unsigned NumElts =
1272         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1273     return MVT::getScalableVectorVT(EltVT, NumElts);
1274   }
1275   }
1276 }
1277 
1278 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1279                                             const RISCVSubtarget &Subtarget) {
1280   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1281                                           Subtarget);
1282 }
1283 
1284 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1285   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1286 }
1287 
1288 // Grow V to consume an entire RVV register.
1289 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1290                                        const RISCVSubtarget &Subtarget) {
1291   assert(VT.isScalableVector() &&
1292          "Expected to convert into a scalable vector!");
1293   assert(V.getValueType().isFixedLengthVector() &&
1294          "Expected a fixed length vector operand!");
1295   SDLoc DL(V);
1296   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1297   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1298 }
1299 
1300 // Shrink V so it's just big enough to maintain a VT's worth of data.
1301 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1302                                          const RISCVSubtarget &Subtarget) {
1303   assert(VT.isFixedLengthVector() &&
1304          "Expected to convert into a fixed length vector!");
1305   assert(V.getValueType().isScalableVector() &&
1306          "Expected a scalable vector operand!");
1307   SDLoc DL(V);
1308   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1309   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1310 }
1311 
1312 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1313 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1314 // the vector type that it is contained in.
1315 static std::pair<SDValue, SDValue>
1316 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1317                 const RISCVSubtarget &Subtarget) {
1318   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1319   MVT XLenVT = Subtarget.getXLenVT();
1320   SDValue VL = VecVT.isFixedLengthVector()
1321                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1322                    : DAG.getRegister(RISCV::X0, XLenVT);
1323   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1324   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1325   return {Mask, VL};
1326 }
1327 
1328 // As above but assuming the given type is a scalable vector type.
1329 static std::pair<SDValue, SDValue>
1330 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1331                         const RISCVSubtarget &Subtarget) {
1332   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1333   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1334 }
1335 
1336 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1337 // of either is (currently) supported. This can get us into an infinite loop
1338 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1339 // as a ..., etc.
1340 // Until either (or both) of these can reliably lower any node, reporting that
1341 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1342 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1343 // which is not desirable.
1344 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1345     EVT VT, unsigned DefinedValues) const {
1346   return false;
1347 }
1348 
1349 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1350   // Only splats are currently supported.
1351   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1352     return true;
1353 
1354   return false;
1355 }
1356 
1357 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1358                                  const RISCVSubtarget &Subtarget) {
1359   MVT VT = Op.getSimpleValueType();
1360   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1361 
1362   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1363 
1364   SDLoc DL(Op);
1365   SDValue Mask, VL;
1366   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1367 
1368   unsigned Opc =
1369       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1370   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1371   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1372 }
1373 
1374 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1375                                  const RISCVSubtarget &Subtarget) {
1376   MVT VT = Op.getSimpleValueType();
1377   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1378 
1379   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1380 
1381   SDLoc DL(Op);
1382   SDValue Mask, VL;
1383   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1384 
1385   MVT XLenVT = Subtarget.getXLenVT();
1386   unsigned NumElts = Op.getNumOperands();
1387 
1388   if (VT.getVectorElementType() == MVT::i1) {
1389     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1390       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1391       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1392     }
1393 
1394     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1395       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1396       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1397     }
1398 
1399     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1400     // scalar integer chunks whose bit-width depends on the number of mask
1401     // bits and XLEN.
1402     // First, determine the most appropriate scalar integer type to use. This
1403     // is at most XLenVT, but may be shrunk to a smaller vector element type
1404     // according to the size of the final vector - use i8 chunks rather than
1405     // XLenVT if we're producing a v8i1. This results in more consistent
1406     // codegen across RV32 and RV64.
1407     unsigned NumViaIntegerBits =
1408         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1409     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1410       // If we have to use more than one INSERT_VECTOR_ELT then this
1411       // optimization is likely to increase code size; avoid peforming it in
1412       // such a case. We can use a load from a constant pool in this case.
1413       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1414         return SDValue();
1415       // Now we can create our integer vector type. Note that it may be larger
1416       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1417       MVT IntegerViaVecVT =
1418           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1419                            divideCeil(NumElts, NumViaIntegerBits));
1420 
1421       uint64_t Bits = 0;
1422       unsigned BitPos = 0, IntegerEltIdx = 0;
1423       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1424 
1425       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1426         // Once we accumulate enough bits to fill our scalar type, insert into
1427         // our vector and clear our accumulated data.
1428         if (I != 0 && I % NumViaIntegerBits == 0) {
1429           if (NumViaIntegerBits <= 32)
1430             Bits = SignExtend64(Bits, 32);
1431           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1432           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1433                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1434           Bits = 0;
1435           BitPos = 0;
1436           IntegerEltIdx++;
1437         }
1438         SDValue V = Op.getOperand(I);
1439         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1440         Bits |= ((uint64_t)BitValue << BitPos);
1441       }
1442 
1443       // Insert the (remaining) scalar value into position in our integer
1444       // vector type.
1445       if (NumViaIntegerBits <= 32)
1446         Bits = SignExtend64(Bits, 32);
1447       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1448       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1449                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1450 
1451       if (NumElts < NumViaIntegerBits) {
1452         // If we're producing a smaller vector than our minimum legal integer
1453         // type, bitcast to the equivalent (known-legal) mask type, and extract
1454         // our final mask.
1455         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1456         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1457         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1458                           DAG.getConstant(0, DL, XLenVT));
1459       } else {
1460         // Else we must have produced an integer type with the same size as the
1461         // mask type; bitcast for the final result.
1462         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1463         Vec = DAG.getBitcast(VT, Vec);
1464       }
1465 
1466       return Vec;
1467     }
1468 
1469     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1470     // vector type, we have a legal equivalently-sized i8 type, so we can use
1471     // that.
1472     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1473     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1474 
1475     SDValue WideVec;
1476     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1477       // For a splat, perform a scalar truncate before creating the wider
1478       // vector.
1479       assert(Splat.getValueType() == XLenVT &&
1480              "Unexpected type for i1 splat value");
1481       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1482                           DAG.getConstant(1, DL, XLenVT));
1483       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1484     } else {
1485       SmallVector<SDValue, 8> Ops(Op->op_values());
1486       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1487       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1488       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1489     }
1490 
1491     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1492   }
1493 
1494   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1495     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1496                                         : RISCVISD::VMV_V_X_VL;
1497     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1498     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1499   }
1500 
1501   // Try and match an index sequence, which we can lower directly to the vid
1502   // instruction. An all-undef vector is matched by getSplatValue, above.
1503   if (VT.isInteger()) {
1504     bool IsVID = true;
1505     for (unsigned I = 0; I < NumElts && IsVID; I++)
1506       IsVID &= Op.getOperand(I).isUndef() ||
1507                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1508                 Op.getConstantOperandVal(I) == I);
1509 
1510     if (IsVID) {
1511       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1512       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1513     }
1514   }
1515 
1516   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1517   // when re-interpreted as a vector with a larger element type. For example,
1518   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1519   // could be instead splat as
1520   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1521   // TODO: This optimization could also work on non-constant splats, but it
1522   // would require bit-manipulation instructions to construct the splat value.
1523   SmallVector<SDValue> Sequence;
1524   unsigned EltBitSize = VT.getScalarSizeInBits();
1525   const auto *BV = cast<BuildVectorSDNode>(Op);
1526   if (VT.isInteger() && EltBitSize < 64 &&
1527       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1528       BV->getRepeatedSequence(Sequence) &&
1529       (Sequence.size() * EltBitSize) <= 64) {
1530     unsigned SeqLen = Sequence.size();
1531     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1532     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1533     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1534             ViaIntVT == MVT::i64) &&
1535            "Unexpected sequence type");
1536 
1537     unsigned EltIdx = 0;
1538     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1539     uint64_t SplatValue = 0;
1540     // Construct the amalgamated value which can be splatted as this larger
1541     // vector type.
1542     for (const auto &SeqV : Sequence) {
1543       if (!SeqV.isUndef())
1544         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1545                        << (EltIdx * EltBitSize));
1546       EltIdx++;
1547     }
1548 
1549     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1550     // achieve better constant materializion.
1551     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1552       SplatValue = SignExtend64(SplatValue, 32);
1553 
1554     // Since we can't introduce illegal i64 types at this stage, we can only
1555     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1556     // way we can use RVV instructions to splat.
1557     assert((ViaIntVT.bitsLE(XLenVT) ||
1558             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1559            "Unexpected bitcast sequence");
1560     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1561       SDValue ViaVL =
1562           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1563       MVT ViaContainerVT =
1564           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1565       SDValue Splat =
1566           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1567                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1568       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1569       return DAG.getBitcast(VT, Splat);
1570     }
1571   }
1572 
1573   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1574   // which constitute a large proportion of the elements. In such cases we can
1575   // splat a vector with the dominant element and make up the shortfall with
1576   // INSERT_VECTOR_ELTs.
1577   // Note that this includes vectors of 2 elements by association. The
1578   // upper-most element is the "dominant" one, allowing us to use a splat to
1579   // "insert" the upper element, and an insert of the lower element at position
1580   // 0, which improves codegen.
1581   SDValue DominantValue;
1582   unsigned MostCommonCount = 0;
1583   DenseMap<SDValue, unsigned> ValueCounts;
1584   unsigned NumUndefElts =
1585       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1586 
1587   for (SDValue V : Op->op_values()) {
1588     if (V.isUndef())
1589       continue;
1590 
1591     ValueCounts.insert(std::make_pair(V, 0));
1592     unsigned &Count = ValueCounts[V];
1593 
1594     // Is this value dominant? In case of a tie, prefer the highest element as
1595     // it's cheaper to insert near the beginning of a vector than it is at the
1596     // end.
1597     if (++Count >= MostCommonCount) {
1598       DominantValue = V;
1599       MostCommonCount = Count;
1600     }
1601   }
1602 
1603   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1604   unsigned NumDefElts = NumElts - NumUndefElts;
1605   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1606 
1607   // Don't perform this optimization when optimizing for size, since
1608   // materializing elements and inserting them tends to cause code bloat.
1609   if (!DAG.shouldOptForSize() &&
1610       ((MostCommonCount > DominantValueCountThreshold) ||
1611        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1612     // Start by splatting the most common element.
1613     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1614 
1615     DenseSet<SDValue> Processed{DominantValue};
1616     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1617     for (const auto &OpIdx : enumerate(Op->ops())) {
1618       const SDValue &V = OpIdx.value();
1619       if (V.isUndef() || !Processed.insert(V).second)
1620         continue;
1621       if (ValueCounts[V] == 1) {
1622         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1623                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1624       } else {
1625         // Blend in all instances of this value using a VSELECT, using a
1626         // mask where each bit signals whether that element is the one
1627         // we're after.
1628         SmallVector<SDValue> Ops;
1629         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1630           return DAG.getConstant(V == V1, DL, XLenVT);
1631         });
1632         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1633                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1634                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1635       }
1636     }
1637 
1638     return Vec;
1639   }
1640 
1641   return SDValue();
1642 }
1643 
1644 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1645                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1646   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1647     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1648     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1649     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1650     // node in order to try and match RVV vector/scalar instructions.
1651     if ((LoC >> 31) == HiC)
1652       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1653   }
1654 
1655   // Fall back to a stack store and stride x0 vector load.
1656   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1657 }
1658 
1659 // Called by type legalization to handle splat of i64 on RV32.
1660 // FIXME: We can optimize this when the type has sign or zero bits in one
1661 // of the halves.
1662 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1663                                    SDValue VL, SelectionDAG &DAG) {
1664   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1665   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1666                            DAG.getConstant(0, DL, MVT::i32));
1667   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1668                            DAG.getConstant(1, DL, MVT::i32));
1669   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1670 }
1671 
1672 // This function lowers a splat of a scalar operand Splat with the vector
1673 // length VL. It ensures the final sequence is type legal, which is useful when
1674 // lowering a splat after type legalization.
1675 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1676                                 SelectionDAG &DAG,
1677                                 const RISCVSubtarget &Subtarget) {
1678   if (VT.isFloatingPoint())
1679     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1680 
1681   MVT XLenVT = Subtarget.getXLenVT();
1682 
1683   // Simplest case is that the operand needs to be promoted to XLenVT.
1684   if (Scalar.getValueType().bitsLE(XLenVT)) {
1685     // If the operand is a constant, sign extend to increase our chances
1686     // of being able to use a .vi instruction. ANY_EXTEND would become a
1687     // a zero extend and the simm5 check in isel would fail.
1688     // FIXME: Should we ignore the upper bits in isel instead?
1689     unsigned ExtOpc =
1690         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1691     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1692     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1693   }
1694 
1695   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1696          "Unexpected scalar for splat lowering!");
1697 
1698   // Otherwise use the more complicated splatting algorithm.
1699   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1700 }
1701 
1702 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1703                                    const RISCVSubtarget &Subtarget) {
1704   SDValue V1 = Op.getOperand(0);
1705   SDValue V2 = Op.getOperand(1);
1706   SDLoc DL(Op);
1707   MVT XLenVT = Subtarget.getXLenVT();
1708   MVT VT = Op.getSimpleValueType();
1709   unsigned NumElts = VT.getVectorNumElements();
1710   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1711 
1712   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1713 
1714   SDValue TrueMask, VL;
1715   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1716 
1717   if (SVN->isSplat()) {
1718     const int Lane = SVN->getSplatIndex();
1719     if (Lane >= 0) {
1720       MVT SVT = VT.getVectorElementType();
1721 
1722       // Turn splatted vector load into a strided load with an X0 stride.
1723       SDValue V = V1;
1724       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1725       // with undef.
1726       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1727       int Offset = Lane;
1728       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1729         int OpElements =
1730             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1731         V = V.getOperand(Offset / OpElements);
1732         Offset %= OpElements;
1733       }
1734 
1735       // We need to ensure the load isn't atomic or volatile.
1736       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1737         auto *Ld = cast<LoadSDNode>(V);
1738         Offset *= SVT.getStoreSize();
1739         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1740                                                    TypeSize::Fixed(Offset), DL);
1741 
1742         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1743         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1744           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1745           SDValue IntID =
1746               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1747           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1748                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1749           SDValue NewLoad = DAG.getMemIntrinsicNode(
1750               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1751               DAG.getMachineFunction().getMachineMemOperand(
1752                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1753           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1754           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1755         }
1756 
1757         // Otherwise use a scalar load and splat. This will give the best
1758         // opportunity to fold a splat into the operation. ISel can turn it into
1759         // the x0 strided load if we aren't able to fold away the select.
1760         if (SVT.isFloatingPoint())
1761           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1762                           Ld->getPointerInfo().getWithOffset(Offset),
1763                           Ld->getOriginalAlign(),
1764                           Ld->getMemOperand()->getFlags());
1765         else
1766           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1767                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1768                              Ld->getOriginalAlign(),
1769                              Ld->getMemOperand()->getFlags());
1770         DAG.makeEquivalentMemoryOrdering(Ld, V);
1771 
1772         unsigned Opc =
1773             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1774         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1775         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1776       }
1777 
1778       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1779       assert(Lane < (int)NumElts && "Unexpected lane!");
1780       SDValue Gather =
1781           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1782                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1783       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1784     }
1785   }
1786 
1787   // Detect shuffles which can be re-expressed as vector selects; these are
1788   // shuffles in which each element in the destination is taken from an element
1789   // at the corresponding index in either source vectors.
1790   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1791     int MaskIndex = MaskIdx.value();
1792     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1793   });
1794 
1795   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1796 
1797   SmallVector<SDValue> MaskVals;
1798   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1799   // merged with a second vrgather.
1800   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1801 
1802   // By default we preserve the original operand order, and use a mask to
1803   // select LHS as true and RHS as false. However, since RVV vector selects may
1804   // feature splats but only on the LHS, we may choose to invert our mask and
1805   // instead select between RHS and LHS.
1806   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1807   bool InvertMask = IsSelect == SwapOps;
1808 
1809   // Now construct the mask that will be used by the vselect or blended
1810   // vrgather operation. For vrgathers, construct the appropriate indices into
1811   // each vector.
1812   for (int MaskIndex : SVN->getMask()) {
1813     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1814     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1815     if (!IsSelect) {
1816       bool IsLHS = MaskIndex < (int)NumElts;
1817       // For "undef" elements of -1, shuffle in element 0 instead.
1818       GatherIndicesLHS.push_back(
1819           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1820       // TODO: If we're masking out unused elements anyway, it might produce
1821       // better code if we use the most-common element index instead of 0.
1822       GatherIndicesRHS.push_back(
1823           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1824     }
1825   }
1826 
1827   if (SwapOps) {
1828     std::swap(V1, V2);
1829     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1830   }
1831 
1832   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1833   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1834   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1835 
1836   if (IsSelect)
1837     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1838 
1839   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1840     // On such a large vector we're unable to use i8 as the index type.
1841     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1842     // may involve vector splitting if we're already at LMUL=8, or our
1843     // user-supplied maximum fixed-length LMUL.
1844     return SDValue();
1845   }
1846 
1847   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1848   MVT IndexVT = VT.changeTypeToInteger();
1849   // Since we can't introduce illegal index types at this stage, use i16 and
1850   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1851   // than XLenVT.
1852   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1853     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1854     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1855   }
1856 
1857   MVT IndexContainerVT =
1858       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1859 
1860   SDValue Gather;
1861   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1862   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1863   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1864     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1865   } else {
1866     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1867     LHSIndices =
1868         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1869 
1870     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1871     Gather =
1872         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1873   }
1874 
1875   // If a second vector operand is used by this shuffle, blend it in with an
1876   // additional vrgather.
1877   if (!V2.isUndef()) {
1878     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1879     SelectMask =
1880         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1881 
1882     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1883     RHSIndices =
1884         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1885 
1886     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1887     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1888     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1889                          Gather, VL);
1890   }
1891 
1892   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1893 }
1894 
1895 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1896                                      SDLoc DL, SelectionDAG &DAG,
1897                                      const RISCVSubtarget &Subtarget) {
1898   if (VT.isScalableVector())
1899     return DAG.getFPExtendOrRound(Op, DL, VT);
1900   assert(VT.isFixedLengthVector() &&
1901          "Unexpected value type for RVV FP extend/round lowering");
1902   SDValue Mask, VL;
1903   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1904   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1905                         ? RISCVISD::FP_EXTEND_VL
1906                         : RISCVISD::FP_ROUND_VL;
1907   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1908 }
1909 
1910 // While RVV has alignment restrictions, we should always be able to load as a
1911 // legal equivalently-sized byte-typed vector instead. This method is
1912 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
1913 // the load is already correctly-aligned, it returns SDValue().
1914 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
1915                                                     SelectionDAG &DAG) const {
1916   auto *Load = cast<LoadSDNode>(Op);
1917   assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
1918 
1919   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1920                                      Load->getMemoryVT(),
1921                                      *Load->getMemOperand()))
1922     return SDValue();
1923 
1924   SDLoc DL(Op);
1925   MVT VT = Op.getSimpleValueType();
1926   unsigned EltSizeBits = VT.getScalarSizeInBits();
1927   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1928          "Unexpected unaligned RVV load type");
1929   MVT NewVT =
1930       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1931   assert(NewVT.isValid() &&
1932          "Expecting equally-sized RVV vector types to be legal");
1933   SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
1934                           Load->getPointerInfo(), Load->getOriginalAlign(),
1935                           Load->getMemOperand()->getFlags());
1936   return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
1937 }
1938 
1939 // While RVV has alignment restrictions, we should always be able to store as a
1940 // legal equivalently-sized byte-typed vector instead. This method is
1941 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
1942 // returns SDValue() if the store is already correctly aligned.
1943 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
1944                                                      SelectionDAG &DAG) const {
1945   auto *Store = cast<StoreSDNode>(Op);
1946   assert(Store && Store->getValue().getValueType().isVector() &&
1947          "Expected vector store");
1948 
1949   if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
1950                                      Store->getMemoryVT(),
1951                                      *Store->getMemOperand()))
1952     return SDValue();
1953 
1954   SDLoc DL(Op);
1955   SDValue StoredVal = Store->getValue();
1956   MVT VT = StoredVal.getSimpleValueType();
1957   unsigned EltSizeBits = VT.getScalarSizeInBits();
1958   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
1959          "Unexpected unaligned RVV store type");
1960   MVT NewVT =
1961       MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
1962   assert(NewVT.isValid() &&
1963          "Expecting equally-sized RVV vector types to be legal");
1964   StoredVal = DAG.getBitcast(NewVT, StoredVal);
1965   return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
1966                       Store->getPointerInfo(), Store->getOriginalAlign(),
1967                       Store->getMemOperand()->getFlags());
1968 }
1969 
1970 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1971                                             SelectionDAG &DAG) const {
1972   switch (Op.getOpcode()) {
1973   default:
1974     report_fatal_error("unimplemented operand");
1975   case ISD::GlobalAddress:
1976     return lowerGlobalAddress(Op, DAG);
1977   case ISD::BlockAddress:
1978     return lowerBlockAddress(Op, DAG);
1979   case ISD::ConstantPool:
1980     return lowerConstantPool(Op, DAG);
1981   case ISD::JumpTable:
1982     return lowerJumpTable(Op, DAG);
1983   case ISD::GlobalTLSAddress:
1984     return lowerGlobalTLSAddress(Op, DAG);
1985   case ISD::SELECT:
1986     return lowerSELECT(Op, DAG);
1987   case ISD::BRCOND:
1988     return lowerBRCOND(Op, DAG);
1989   case ISD::VASTART:
1990     return lowerVASTART(Op, DAG);
1991   case ISD::FRAMEADDR:
1992     return lowerFRAMEADDR(Op, DAG);
1993   case ISD::RETURNADDR:
1994     return lowerRETURNADDR(Op, DAG);
1995   case ISD::SHL_PARTS:
1996     return lowerShiftLeftParts(Op, DAG);
1997   case ISD::SRA_PARTS:
1998     return lowerShiftRightParts(Op, DAG, true);
1999   case ISD::SRL_PARTS:
2000     return lowerShiftRightParts(Op, DAG, false);
2001   case ISD::BITCAST: {
2002     SDLoc DL(Op);
2003     EVT VT = Op.getValueType();
2004     SDValue Op0 = Op.getOperand(0);
2005     EVT Op0VT = Op0.getValueType();
2006     MVT XLenVT = Subtarget.getXLenVT();
2007     if (VT.isFixedLengthVector()) {
2008       // We can handle fixed length vector bitcasts with a simple replacement
2009       // in isel.
2010       if (Op0VT.isFixedLengthVector())
2011         return Op;
2012       // When bitcasting from scalar to fixed-length vector, insert the scalar
2013       // into a one-element vector of the result type, and perform a vector
2014       // bitcast.
2015       if (!Op0VT.isVector()) {
2016         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2017         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2018                                               DAG.getUNDEF(BVT), Op0,
2019                                               DAG.getConstant(0, DL, XLenVT)));
2020       }
2021       return SDValue();
2022     }
2023     // Custom-legalize bitcasts from fixed-length vector types to scalar types
2024     // thus: bitcast the vector to a one-element vector type whose element type
2025     // is the same as the result type, and extract the first element.
2026     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2027       LLVMContext &Context = *DAG.getContext();
2028       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2029       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2030                          DAG.getConstant(0, DL, XLenVT));
2031     }
2032     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2033       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2034       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2035       return FPConv;
2036     }
2037     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2038         Subtarget.hasStdExtF()) {
2039       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2040       SDValue FPConv =
2041           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2042       return FPConv;
2043     }
2044     return SDValue();
2045   }
2046   case ISD::INTRINSIC_WO_CHAIN:
2047     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2048   case ISD::INTRINSIC_W_CHAIN:
2049     return LowerINTRINSIC_W_CHAIN(Op, DAG);
2050   case ISD::BSWAP:
2051   case ISD::BITREVERSE: {
2052     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2053     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2054     MVT VT = Op.getSimpleValueType();
2055     SDLoc DL(Op);
2056     // Start with the maximum immediate value which is the bitwidth - 1.
2057     unsigned Imm = VT.getSizeInBits() - 1;
2058     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2059     if (Op.getOpcode() == ISD::BSWAP)
2060       Imm &= ~0x7U;
2061     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2062                        DAG.getConstant(Imm, DL, VT));
2063   }
2064   case ISD::FSHL:
2065   case ISD::FSHR: {
2066     MVT VT = Op.getSimpleValueType();
2067     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2068     SDLoc DL(Op);
2069     if (Op.getOperand(2).getOpcode() == ISD::Constant)
2070       return Op;
2071     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2072     // use log(XLen) bits. Mask the shift amount accordingly.
2073     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2074     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2075                                 DAG.getConstant(ShAmtWidth, DL, VT));
2076     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2077     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2078   }
2079   case ISD::TRUNCATE: {
2080     SDLoc DL(Op);
2081     MVT VT = Op.getSimpleValueType();
2082     // Only custom-lower vector truncates
2083     if (!VT.isVector())
2084       return Op;
2085 
2086     // Truncates to mask types are handled differently
2087     if (VT.getVectorElementType() == MVT::i1)
2088       return lowerVectorMaskTrunc(Op, DAG);
2089 
2090     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2091     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2092     // truncate by one power of two at a time.
2093     MVT DstEltVT = VT.getVectorElementType();
2094 
2095     SDValue Src = Op.getOperand(0);
2096     MVT SrcVT = Src.getSimpleValueType();
2097     MVT SrcEltVT = SrcVT.getVectorElementType();
2098 
2099     assert(DstEltVT.bitsLT(SrcEltVT) &&
2100            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2101            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2102            "Unexpected vector truncate lowering");
2103 
2104     MVT ContainerVT = SrcVT;
2105     if (SrcVT.isFixedLengthVector()) {
2106       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2107       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2108     }
2109 
2110     SDValue Result = Src;
2111     SDValue Mask, VL;
2112     std::tie(Mask, VL) =
2113         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2114     LLVMContext &Context = *DAG.getContext();
2115     const ElementCount Count = ContainerVT.getVectorElementCount();
2116     do {
2117       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2118       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2119       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2120                            Mask, VL);
2121     } while (SrcEltVT != DstEltVT);
2122 
2123     if (SrcVT.isFixedLengthVector())
2124       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2125 
2126     return Result;
2127   }
2128   case ISD::ANY_EXTEND:
2129   case ISD::ZERO_EXTEND:
2130     if (Op.getOperand(0).getValueType().isVector() &&
2131         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2132       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2133     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2134   case ISD::SIGN_EXTEND:
2135     if (Op.getOperand(0).getValueType().isVector() &&
2136         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2137       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2138     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2139   case ISD::SPLAT_VECTOR_PARTS:
2140     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2141   case ISD::INSERT_VECTOR_ELT:
2142     return lowerINSERT_VECTOR_ELT(Op, DAG);
2143   case ISD::EXTRACT_VECTOR_ELT:
2144     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2145   case ISD::VSCALE: {
2146     MVT VT = Op.getSimpleValueType();
2147     SDLoc DL(Op);
2148     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2149     // We define our scalable vector types for lmul=1 to use a 64 bit known
2150     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2151     // vscale as VLENB / 8.
2152     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2153     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2154                                  DAG.getConstant(3, DL, VT));
2155     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2156   }
2157   case ISD::FP_EXTEND: {
2158     // RVV can only do fp_extend to types double the size as the source. We
2159     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2160     // via f32.
2161     SDLoc DL(Op);
2162     MVT VT = Op.getSimpleValueType();
2163     SDValue Src = Op.getOperand(0);
2164     MVT SrcVT = Src.getSimpleValueType();
2165 
2166     // Prepare any fixed-length vector operands.
2167     MVT ContainerVT = VT;
2168     if (SrcVT.isFixedLengthVector()) {
2169       ContainerVT = getContainerForFixedLengthVector(VT);
2170       MVT SrcContainerVT =
2171           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2172       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2173     }
2174 
2175     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2176         SrcVT.getVectorElementType() != MVT::f16) {
2177       // For scalable vectors, we only need to close the gap between
2178       // vXf16->vXf64.
2179       if (!VT.isFixedLengthVector())
2180         return Op;
2181       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2182       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2183       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2184     }
2185 
2186     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2187     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2188     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2189         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2190 
2191     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2192                                            DL, DAG, Subtarget);
2193     if (VT.isFixedLengthVector())
2194       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2195     return Extend;
2196   }
2197   case ISD::FP_ROUND: {
2198     // RVV can only do fp_round to types half the size as the source. We
2199     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2200     // conversion instruction.
2201     SDLoc DL(Op);
2202     MVT VT = Op.getSimpleValueType();
2203     SDValue Src = Op.getOperand(0);
2204     MVT SrcVT = Src.getSimpleValueType();
2205 
2206     // Prepare any fixed-length vector operands.
2207     MVT ContainerVT = VT;
2208     if (VT.isFixedLengthVector()) {
2209       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2210       ContainerVT =
2211           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2212       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2213     }
2214 
2215     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2216         SrcVT.getVectorElementType() != MVT::f64) {
2217       // For scalable vectors, we only need to close the gap between
2218       // vXf64<->vXf16.
2219       if (!VT.isFixedLengthVector())
2220         return Op;
2221       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2222       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2223       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2224     }
2225 
2226     SDValue Mask, VL;
2227     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2228 
2229     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2230     SDValue IntermediateRound =
2231         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2232     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2233                                           DL, DAG, Subtarget);
2234 
2235     if (VT.isFixedLengthVector())
2236       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2237     return Round;
2238   }
2239   case ISD::FP_TO_SINT:
2240   case ISD::FP_TO_UINT:
2241   case ISD::SINT_TO_FP:
2242   case ISD::UINT_TO_FP: {
2243     // RVV can only do fp<->int conversions to types half/double the size as
2244     // the source. We custom-lower any conversions that do two hops into
2245     // sequences.
2246     MVT VT = Op.getSimpleValueType();
2247     if (!VT.isVector())
2248       return Op;
2249     SDLoc DL(Op);
2250     SDValue Src = Op.getOperand(0);
2251     MVT EltVT = VT.getVectorElementType();
2252     MVT SrcVT = Src.getSimpleValueType();
2253     MVT SrcEltVT = SrcVT.getVectorElementType();
2254     unsigned EltSize = EltVT.getSizeInBits();
2255     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2256     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2257            "Unexpected vector element types");
2258 
2259     bool IsInt2FP = SrcEltVT.isInteger();
2260     // Widening conversions
2261     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2262       if (IsInt2FP) {
2263         // Do a regular integer sign/zero extension then convert to float.
2264         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2265                                       VT.getVectorElementCount());
2266         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2267                                  ? ISD::ZERO_EXTEND
2268                                  : ISD::SIGN_EXTEND;
2269         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2270         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2271       }
2272       // FP2Int
2273       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2274       // Do one doubling fp_extend then complete the operation by converting
2275       // to int.
2276       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2277       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2278       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2279     }
2280 
2281     // Narrowing conversions
2282     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2283       if (IsInt2FP) {
2284         // One narrowing int_to_fp, then an fp_round.
2285         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2286         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2287         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2288         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2289       }
2290       // FP2Int
2291       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2292       // representable by the integer, the result is poison.
2293       MVT IVecVT =
2294           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2295                            VT.getVectorElementCount());
2296       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2297       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2298     }
2299 
2300     // Scalable vectors can exit here. Patterns will handle equally-sized
2301     // conversions halving/doubling ones.
2302     if (!VT.isFixedLengthVector())
2303       return Op;
2304 
2305     // For fixed-length vectors we lower to a custom "VL" node.
2306     unsigned RVVOpc = 0;
2307     switch (Op.getOpcode()) {
2308     default:
2309       llvm_unreachable("Impossible opcode");
2310     case ISD::FP_TO_SINT:
2311       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2312       break;
2313     case ISD::FP_TO_UINT:
2314       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2315       break;
2316     case ISD::SINT_TO_FP:
2317       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2318       break;
2319     case ISD::UINT_TO_FP:
2320       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2321       break;
2322     }
2323 
2324     MVT ContainerVT, SrcContainerVT;
2325     // Derive the reference container type from the larger vector type.
2326     if (SrcEltSize > EltSize) {
2327       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2328       ContainerVT =
2329           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2330     } else {
2331       ContainerVT = getContainerForFixedLengthVector(VT);
2332       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2333     }
2334 
2335     SDValue Mask, VL;
2336     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2337 
2338     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2339     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2340     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2341   }
2342   case ISD::VECREDUCE_ADD:
2343   case ISD::VECREDUCE_UMAX:
2344   case ISD::VECREDUCE_SMAX:
2345   case ISD::VECREDUCE_UMIN:
2346   case ISD::VECREDUCE_SMIN:
2347     return lowerVECREDUCE(Op, DAG);
2348   case ISD::VECREDUCE_AND:
2349   case ISD::VECREDUCE_OR:
2350   case ISD::VECREDUCE_XOR:
2351     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2352       return lowerVectorMaskVECREDUCE(Op, DAG);
2353     return lowerVECREDUCE(Op, DAG);
2354   case ISD::VECREDUCE_FADD:
2355   case ISD::VECREDUCE_SEQ_FADD:
2356   case ISD::VECREDUCE_FMIN:
2357   case ISD::VECREDUCE_FMAX:
2358     return lowerFPVECREDUCE(Op, DAG);
2359   case ISD::INSERT_SUBVECTOR:
2360     return lowerINSERT_SUBVECTOR(Op, DAG);
2361   case ISD::EXTRACT_SUBVECTOR:
2362     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2363   case ISD::STEP_VECTOR:
2364     return lowerSTEP_VECTOR(Op, DAG);
2365   case ISD::VECTOR_REVERSE:
2366     return lowerVECTOR_REVERSE(Op, DAG);
2367   case ISD::BUILD_VECTOR:
2368     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2369   case ISD::SPLAT_VECTOR:
2370     if (Op.getValueType().getVectorElementType() == MVT::i1)
2371       return lowerVectorMaskSplat(Op, DAG);
2372     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2373   case ISD::VECTOR_SHUFFLE:
2374     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2375   case ISD::CONCAT_VECTORS: {
2376     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2377     // better than going through the stack, as the default expansion does.
2378     SDLoc DL(Op);
2379     MVT VT = Op.getSimpleValueType();
2380     unsigned NumOpElts =
2381         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2382     SDValue Vec = DAG.getUNDEF(VT);
2383     for (const auto &OpIdx : enumerate(Op->ops()))
2384       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2385                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2386     return Vec;
2387   }
2388   case ISD::LOAD:
2389     if (auto V = expandUnalignedRVVLoad(Op, DAG))
2390       return V;
2391     if (Op.getValueType().isFixedLengthVector())
2392       return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2393     return Op;
2394   case ISD::STORE:
2395     if (auto V = expandUnalignedRVVStore(Op, DAG))
2396       return V;
2397     if (Op.getOperand(1).getValueType().isFixedLengthVector())
2398       return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2399     return Op;
2400   case ISD::MLOAD:
2401     return lowerMLOAD(Op, DAG);
2402   case ISD::MSTORE:
2403     return lowerMSTORE(Op, DAG);
2404   case ISD::SETCC:
2405     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2406   case ISD::ADD:
2407     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2408   case ISD::SUB:
2409     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2410   case ISD::MUL:
2411     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2412   case ISD::MULHS:
2413     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2414   case ISD::MULHU:
2415     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2416   case ISD::AND:
2417     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2418                                               RISCVISD::AND_VL);
2419   case ISD::OR:
2420     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2421                                               RISCVISD::OR_VL);
2422   case ISD::XOR:
2423     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2424                                               RISCVISD::XOR_VL);
2425   case ISD::SDIV:
2426     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2427   case ISD::SREM:
2428     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2429   case ISD::UDIV:
2430     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2431   case ISD::UREM:
2432     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2433   case ISD::SHL:
2434     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2435   case ISD::SRA:
2436     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2437   case ISD::SRL:
2438     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2439   case ISD::FADD:
2440     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2441   case ISD::FSUB:
2442     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2443   case ISD::FMUL:
2444     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2445   case ISD::FDIV:
2446     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2447   case ISD::FNEG:
2448     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2449   case ISD::FABS:
2450     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2451   case ISD::FSQRT:
2452     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2453   case ISD::FMA:
2454     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2455   case ISD::SMIN:
2456     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2457   case ISD::SMAX:
2458     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2459   case ISD::UMIN:
2460     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2461   case ISD::UMAX:
2462     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2463   case ISD::FMINNUM:
2464     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2465   case ISD::FMAXNUM:
2466     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2467   case ISD::ABS:
2468     return lowerABS(Op, DAG);
2469   case ISD::VSELECT:
2470     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2471   case ISD::FCOPYSIGN:
2472     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2473   case ISD::MGATHER:
2474     return lowerMGATHER(Op, DAG);
2475   case ISD::MSCATTER:
2476     return lowerMSCATTER(Op, DAG);
2477   case ISD::FLT_ROUNDS_:
2478     return lowerGET_ROUNDING(Op, DAG);
2479   case ISD::SET_ROUNDING:
2480     return lowerSET_ROUNDING(Op, DAG);
2481   case ISD::VP_ADD:
2482     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2483   case ISD::VP_SUB:
2484     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2485   case ISD::VP_MUL:
2486     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2487   case ISD::VP_SDIV:
2488     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2489   case ISD::VP_UDIV:
2490     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2491   case ISD::VP_SREM:
2492     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2493   case ISD::VP_UREM:
2494     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2495   case ISD::VP_AND:
2496     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2497   case ISD::VP_OR:
2498     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2499   case ISD::VP_XOR:
2500     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2501   case ISD::VP_ASHR:
2502     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2503   case ISD::VP_LSHR:
2504     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2505   case ISD::VP_SHL:
2506     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2507   case ISD::VP_FADD:
2508     return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2509   case ISD::VP_FSUB:
2510     return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2511   case ISD::VP_FMUL:
2512     return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2513   case ISD::VP_FDIV:
2514     return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2515   }
2516 }
2517 
2518 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2519                              SelectionDAG &DAG, unsigned Flags) {
2520   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2521 }
2522 
2523 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2524                              SelectionDAG &DAG, unsigned Flags) {
2525   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2526                                    Flags);
2527 }
2528 
2529 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2530                              SelectionDAG &DAG, unsigned Flags) {
2531   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2532                                    N->getOffset(), Flags);
2533 }
2534 
2535 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2536                              SelectionDAG &DAG, unsigned Flags) {
2537   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2538 }
2539 
2540 template <class NodeTy>
2541 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2542                                      bool IsLocal) const {
2543   SDLoc DL(N);
2544   EVT Ty = getPointerTy(DAG.getDataLayout());
2545 
2546   if (isPositionIndependent()) {
2547     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2548     if (IsLocal)
2549       // Use PC-relative addressing to access the symbol. This generates the
2550       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2551       // %pcrel_lo(auipc)).
2552       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2553 
2554     // Use PC-relative addressing to access the GOT for this symbol, then load
2555     // the address from the GOT. This generates the pattern (PseudoLA sym),
2556     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2557     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2558   }
2559 
2560   switch (getTargetMachine().getCodeModel()) {
2561   default:
2562     report_fatal_error("Unsupported code model for lowering");
2563   case CodeModel::Small: {
2564     // Generate a sequence for accessing addresses within the first 2 GiB of
2565     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2566     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2567     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2568     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2569     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2570   }
2571   case CodeModel::Medium: {
2572     // Generate a sequence for accessing addresses within any 2GiB range within
2573     // the address space. This generates the pattern (PseudoLLA sym), which
2574     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2575     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2576     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2577   }
2578   }
2579 }
2580 
2581 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2582                                                 SelectionDAG &DAG) const {
2583   SDLoc DL(Op);
2584   EVT Ty = Op.getValueType();
2585   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2586   int64_t Offset = N->getOffset();
2587   MVT XLenVT = Subtarget.getXLenVT();
2588 
2589   const GlobalValue *GV = N->getGlobal();
2590   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2591   SDValue Addr = getAddr(N, DAG, IsLocal);
2592 
2593   // In order to maximise the opportunity for common subexpression elimination,
2594   // emit a separate ADD node for the global address offset instead of folding
2595   // it in the global address node. Later peephole optimisations may choose to
2596   // fold it back in when profitable.
2597   if (Offset != 0)
2598     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2599                        DAG.getConstant(Offset, DL, XLenVT));
2600   return Addr;
2601 }
2602 
2603 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2604                                                SelectionDAG &DAG) const {
2605   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2606 
2607   return getAddr(N, DAG);
2608 }
2609 
2610 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2611                                                SelectionDAG &DAG) const {
2612   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2613 
2614   return getAddr(N, DAG);
2615 }
2616 
2617 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2618                                             SelectionDAG &DAG) const {
2619   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2620 
2621   return getAddr(N, DAG);
2622 }
2623 
2624 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2625                                               SelectionDAG &DAG,
2626                                               bool UseGOT) const {
2627   SDLoc DL(N);
2628   EVT Ty = getPointerTy(DAG.getDataLayout());
2629   const GlobalValue *GV = N->getGlobal();
2630   MVT XLenVT = Subtarget.getXLenVT();
2631 
2632   if (UseGOT) {
2633     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2634     // load the address from the GOT and add the thread pointer. This generates
2635     // the pattern (PseudoLA_TLS_IE sym), which expands to
2636     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2637     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2638     SDValue Load =
2639         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2640 
2641     // Add the thread pointer.
2642     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2643     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2644   }
2645 
2646   // Generate a sequence for accessing the address relative to the thread
2647   // pointer, with the appropriate adjustment for the thread pointer offset.
2648   // This generates the pattern
2649   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2650   SDValue AddrHi =
2651       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2652   SDValue AddrAdd =
2653       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2654   SDValue AddrLo =
2655       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2656 
2657   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2658   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2659   SDValue MNAdd = SDValue(
2660       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2661       0);
2662   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2663 }
2664 
2665 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2666                                                SelectionDAG &DAG) const {
2667   SDLoc DL(N);
2668   EVT Ty = getPointerTy(DAG.getDataLayout());
2669   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2670   const GlobalValue *GV = N->getGlobal();
2671 
2672   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2673   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2674   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2675   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2676   SDValue Load =
2677       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2678 
2679   // Prepare argument list to generate call.
2680   ArgListTy Args;
2681   ArgListEntry Entry;
2682   Entry.Node = Load;
2683   Entry.Ty = CallTy;
2684   Args.push_back(Entry);
2685 
2686   // Setup call to __tls_get_addr.
2687   TargetLowering::CallLoweringInfo CLI(DAG);
2688   CLI.setDebugLoc(DL)
2689       .setChain(DAG.getEntryNode())
2690       .setLibCallee(CallingConv::C, CallTy,
2691                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2692                     std::move(Args));
2693 
2694   return LowerCallTo(CLI).first;
2695 }
2696 
2697 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2698                                                    SelectionDAG &DAG) const {
2699   SDLoc DL(Op);
2700   EVT Ty = Op.getValueType();
2701   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2702   int64_t Offset = N->getOffset();
2703   MVT XLenVT = Subtarget.getXLenVT();
2704 
2705   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2706 
2707   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2708       CallingConv::GHC)
2709     report_fatal_error("In GHC calling convention TLS is not supported");
2710 
2711   SDValue Addr;
2712   switch (Model) {
2713   case TLSModel::LocalExec:
2714     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2715     break;
2716   case TLSModel::InitialExec:
2717     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2718     break;
2719   case TLSModel::LocalDynamic:
2720   case TLSModel::GeneralDynamic:
2721     Addr = getDynamicTLSAddr(N, DAG);
2722     break;
2723   }
2724 
2725   // In order to maximise the opportunity for common subexpression elimination,
2726   // emit a separate ADD node for the global address offset instead of folding
2727   // it in the global address node. Later peephole optimisations may choose to
2728   // fold it back in when profitable.
2729   if (Offset != 0)
2730     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2731                        DAG.getConstant(Offset, DL, XLenVT));
2732   return Addr;
2733 }
2734 
2735 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2736   SDValue CondV = Op.getOperand(0);
2737   SDValue TrueV = Op.getOperand(1);
2738   SDValue FalseV = Op.getOperand(2);
2739   SDLoc DL(Op);
2740   MVT VT = Op.getSimpleValueType();
2741   MVT XLenVT = Subtarget.getXLenVT();
2742 
2743   // Lower vector SELECTs to VSELECTs by splatting the condition.
2744   if (VT.isVector()) {
2745     MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
2746     SDValue CondSplat = VT.isScalableVector()
2747                             ? DAG.getSplatVector(SplatCondVT, DL, CondV)
2748                             : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
2749     return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
2750   }
2751 
2752   // If the result type is XLenVT and CondV is the output of a SETCC node
2753   // which also operated on XLenVT inputs, then merge the SETCC node into the
2754   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2755   // compare+branch instructions. i.e.:
2756   // (select (setcc lhs, rhs, cc), truev, falsev)
2757   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2758   if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2759       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2760     SDValue LHS = CondV.getOperand(0);
2761     SDValue RHS = CondV.getOperand(1);
2762     const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2763     ISD::CondCode CCVal = CC->get();
2764 
2765     // Special case for a select of 2 constants that have a diffence of 1.
2766     // Normally this is done by DAGCombine, but if the select is introduced by
2767     // type legalization or op legalization, we miss it. Restricting to SETLT
2768     // case for now because that is what signed saturating add/sub need.
2769     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2770     // but we would probably want to swap the true/false values if the condition
2771     // is SETGE/SETLE to avoid an XORI.
2772     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2773         CCVal == ISD::SETLT) {
2774       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2775       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2776       if (TrueVal - 1 == FalseVal)
2777         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2778       if (TrueVal + 1 == FalseVal)
2779         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2780     }
2781 
2782     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2783 
2784     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2785     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2786     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2787   }
2788 
2789   // Otherwise:
2790   // (select condv, truev, falsev)
2791   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2792   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2793   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2794 
2795   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2796 
2797   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2798 }
2799 
2800 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2801   SDValue CondV = Op.getOperand(1);
2802   SDLoc DL(Op);
2803   MVT XLenVT = Subtarget.getXLenVT();
2804 
2805   if (CondV.getOpcode() == ISD::SETCC &&
2806       CondV.getOperand(0).getValueType() == XLenVT) {
2807     SDValue LHS = CondV.getOperand(0);
2808     SDValue RHS = CondV.getOperand(1);
2809     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2810 
2811     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2812 
2813     SDValue TargetCC = DAG.getCondCode(CCVal);
2814     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2815                        LHS, RHS, TargetCC, Op.getOperand(2));
2816   }
2817 
2818   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2819                      CondV, DAG.getConstant(0, DL, XLenVT),
2820                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2821 }
2822 
2823 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2824   MachineFunction &MF = DAG.getMachineFunction();
2825   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2826 
2827   SDLoc DL(Op);
2828   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2829                                  getPointerTy(MF.getDataLayout()));
2830 
2831   // vastart just stores the address of the VarArgsFrameIndex slot into the
2832   // memory location argument.
2833   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2834   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2835                       MachinePointerInfo(SV));
2836 }
2837 
2838 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2839                                             SelectionDAG &DAG) const {
2840   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2841   MachineFunction &MF = DAG.getMachineFunction();
2842   MachineFrameInfo &MFI = MF.getFrameInfo();
2843   MFI.setFrameAddressIsTaken(true);
2844   Register FrameReg = RI.getFrameRegister(MF);
2845   int XLenInBytes = Subtarget.getXLen() / 8;
2846 
2847   EVT VT = Op.getValueType();
2848   SDLoc DL(Op);
2849   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2850   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2851   while (Depth--) {
2852     int Offset = -(XLenInBytes * 2);
2853     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2854                               DAG.getIntPtrConstant(Offset, DL));
2855     FrameAddr =
2856         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2857   }
2858   return FrameAddr;
2859 }
2860 
2861 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2862                                              SelectionDAG &DAG) const {
2863   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2864   MachineFunction &MF = DAG.getMachineFunction();
2865   MachineFrameInfo &MFI = MF.getFrameInfo();
2866   MFI.setReturnAddressIsTaken(true);
2867   MVT XLenVT = Subtarget.getXLenVT();
2868   int XLenInBytes = Subtarget.getXLen() / 8;
2869 
2870   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2871     return SDValue();
2872 
2873   EVT VT = Op.getValueType();
2874   SDLoc DL(Op);
2875   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2876   if (Depth) {
2877     int Off = -XLenInBytes;
2878     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2879     SDValue Offset = DAG.getConstant(Off, DL, VT);
2880     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2881                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2882                        MachinePointerInfo());
2883   }
2884 
2885   // Return the value of the return address register, marking it an implicit
2886   // live-in.
2887   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2888   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2889 }
2890 
2891 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2892                                                  SelectionDAG &DAG) const {
2893   SDLoc DL(Op);
2894   SDValue Lo = Op.getOperand(0);
2895   SDValue Hi = Op.getOperand(1);
2896   SDValue Shamt = Op.getOperand(2);
2897   EVT VT = Lo.getValueType();
2898 
2899   // if Shamt-XLEN < 0: // Shamt < XLEN
2900   //   Lo = Lo << Shamt
2901   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2902   // else:
2903   //   Lo = 0
2904   //   Hi = Lo << (Shamt-XLEN)
2905 
2906   SDValue Zero = DAG.getConstant(0, DL, VT);
2907   SDValue One = DAG.getConstant(1, DL, VT);
2908   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2909   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2910   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2911   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2912 
2913   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2914   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2915   SDValue ShiftRightLo =
2916       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2917   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2918   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2919   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2920 
2921   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2922 
2923   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2924   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2925 
2926   SDValue Parts[2] = {Lo, Hi};
2927   return DAG.getMergeValues(Parts, DL);
2928 }
2929 
2930 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2931                                                   bool IsSRA) const {
2932   SDLoc DL(Op);
2933   SDValue Lo = Op.getOperand(0);
2934   SDValue Hi = Op.getOperand(1);
2935   SDValue Shamt = Op.getOperand(2);
2936   EVT VT = Lo.getValueType();
2937 
2938   // SRA expansion:
2939   //   if Shamt-XLEN < 0: // Shamt < XLEN
2940   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2941   //     Hi = Hi >>s Shamt
2942   //   else:
2943   //     Lo = Hi >>s (Shamt-XLEN);
2944   //     Hi = Hi >>s (XLEN-1)
2945   //
2946   // SRL expansion:
2947   //   if Shamt-XLEN < 0: // Shamt < XLEN
2948   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2949   //     Hi = Hi >>u Shamt
2950   //   else:
2951   //     Lo = Hi >>u (Shamt-XLEN);
2952   //     Hi = 0;
2953 
2954   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2955 
2956   SDValue Zero = DAG.getConstant(0, DL, VT);
2957   SDValue One = DAG.getConstant(1, DL, VT);
2958   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2959   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2960   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2961   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2962 
2963   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2964   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2965   SDValue ShiftLeftHi =
2966       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2967   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2968   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2969   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2970   SDValue HiFalse =
2971       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2972 
2973   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2974 
2975   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2976   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2977 
2978   SDValue Parts[2] = {Lo, Hi};
2979   return DAG.getMergeValues(Parts, DL);
2980 }
2981 
2982 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2983 // legal equivalently-sized i8 type, so we can use that as a go-between.
2984 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
2985                                                   SelectionDAG &DAG) const {
2986   SDLoc DL(Op);
2987   MVT VT = Op.getSimpleValueType();
2988   SDValue SplatVal = Op.getOperand(0);
2989   // All-zeros or all-ones splats are handled specially.
2990   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
2991     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2992     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
2993   }
2994   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
2995     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2996     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
2997   }
2998   MVT XLenVT = Subtarget.getXLenVT();
2999   assert(SplatVal.getValueType() == XLenVT &&
3000          "Unexpected type for i1 splat value");
3001   MVT InterVT = VT.changeVectorElementType(MVT::i8);
3002   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3003                          DAG.getConstant(1, DL, XLenVT));
3004   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3005   SDValue Zero = DAG.getConstant(0, DL, InterVT);
3006   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3007 }
3008 
3009 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3010 // illegal (currently only vXi64 RV32).
3011 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3012 // them to SPLAT_VECTOR_I64
3013 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3014                                                      SelectionDAG &DAG) const {
3015   SDLoc DL(Op);
3016   MVT VecVT = Op.getSimpleValueType();
3017   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3018          "Unexpected SPLAT_VECTOR_PARTS lowering");
3019 
3020   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3021   SDValue Lo = Op.getOperand(0);
3022   SDValue Hi = Op.getOperand(1);
3023 
3024   if (VecVT.isFixedLengthVector()) {
3025     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3026     SDLoc DL(Op);
3027     SDValue Mask, VL;
3028     std::tie(Mask, VL) =
3029         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3030 
3031     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3032     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3033   }
3034 
3035   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3036     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3037     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3038     // If Hi constant is all the same sign bit as Lo, lower this as a custom
3039     // node in order to try and match RVV vector/scalar instructions.
3040     if ((LoC >> 31) == HiC)
3041       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3042   }
3043 
3044   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3045   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3046       isa<ConstantSDNode>(Hi.getOperand(1)) &&
3047       Hi.getConstantOperandVal(1) == 31)
3048     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3049 
3050   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3051   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3052                      DAG.getRegister(RISCV::X0, MVT::i64));
3053 }
3054 
3055 // Custom-lower extensions from mask vectors by using a vselect either with 1
3056 // for zero/any-extension or -1 for sign-extension:
3057 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3058 // Note that any-extension is lowered identically to zero-extension.
3059 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3060                                                 int64_t ExtTrueVal) const {
3061   SDLoc DL(Op);
3062   MVT VecVT = Op.getSimpleValueType();
3063   SDValue Src = Op.getOperand(0);
3064   // Only custom-lower extensions from mask types
3065   assert(Src.getValueType().isVector() &&
3066          Src.getValueType().getVectorElementType() == MVT::i1);
3067 
3068   MVT XLenVT = Subtarget.getXLenVT();
3069   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3070   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3071 
3072   if (VecVT.isScalableVector()) {
3073     // Be careful not to introduce illegal scalar types at this stage, and be
3074     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3075     // illegal and must be expanded. Since we know that the constants are
3076     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3077     bool IsRV32E64 =
3078         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3079 
3080     if (!IsRV32E64) {
3081       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3082       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3083     } else {
3084       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3085       SplatTrueVal =
3086           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3087     }
3088 
3089     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3090   }
3091 
3092   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3093   MVT I1ContainerVT =
3094       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3095 
3096   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3097 
3098   SDValue Mask, VL;
3099   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3100 
3101   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3102   SplatTrueVal =
3103       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3104   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3105                                SplatTrueVal, SplatZero, VL);
3106 
3107   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3108 }
3109 
3110 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3111     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3112   MVT ExtVT = Op.getSimpleValueType();
3113   // Only custom-lower extensions from fixed-length vector types.
3114   if (!ExtVT.isFixedLengthVector())
3115     return Op;
3116   MVT VT = Op.getOperand(0).getSimpleValueType();
3117   // Grab the canonical container type for the extended type. Infer the smaller
3118   // type from that to ensure the same number of vector elements, as we know
3119   // the LMUL will be sufficient to hold the smaller type.
3120   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3121   // Get the extended container type manually to ensure the same number of
3122   // vector elements between source and dest.
3123   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3124                                      ContainerExtVT.getVectorElementCount());
3125 
3126   SDValue Op1 =
3127       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3128 
3129   SDLoc DL(Op);
3130   SDValue Mask, VL;
3131   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3132 
3133   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3134 
3135   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3136 }
3137 
3138 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3139 // setcc operation:
3140 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3141 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3142                                                   SelectionDAG &DAG) const {
3143   SDLoc DL(Op);
3144   EVT MaskVT = Op.getValueType();
3145   // Only expect to custom-lower truncations to mask types
3146   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3147          "Unexpected type for vector mask lowering");
3148   SDValue Src = Op.getOperand(0);
3149   MVT VecVT = Src.getSimpleValueType();
3150 
3151   // If this is a fixed vector, we need to convert it to a scalable vector.
3152   MVT ContainerVT = VecVT;
3153   if (VecVT.isFixedLengthVector()) {
3154     ContainerVT = getContainerForFixedLengthVector(VecVT);
3155     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3156   }
3157 
3158   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3159   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3160 
3161   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3162   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3163 
3164   if (VecVT.isScalableVector()) {
3165     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3166     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3167   }
3168 
3169   SDValue Mask, VL;
3170   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3171 
3172   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3173   SDValue Trunc =
3174       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3175   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3176                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3177   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3178 }
3179 
3180 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3181 // first position of a vector, and that vector is slid up to the insert index.
3182 // By limiting the active vector length to index+1 and merging with the
3183 // original vector (with an undisturbed tail policy for elements >= VL), we
3184 // achieve the desired result of leaving all elements untouched except the one
3185 // at VL-1, which is replaced with the desired value.
3186 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3187                                                     SelectionDAG &DAG) const {
3188   SDLoc DL(Op);
3189   MVT VecVT = Op.getSimpleValueType();
3190   SDValue Vec = Op.getOperand(0);
3191   SDValue Val = Op.getOperand(1);
3192   SDValue Idx = Op.getOperand(2);
3193 
3194   if (VecVT.getVectorElementType() == MVT::i1) {
3195     // FIXME: For now we just promote to an i8 vector and insert into that,
3196     // but this is probably not optimal.
3197     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3198     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3199     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3200     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3201   }
3202 
3203   MVT ContainerVT = VecVT;
3204   // If the operand is a fixed-length vector, convert to a scalable one.
3205   if (VecVT.isFixedLengthVector()) {
3206     ContainerVT = getContainerForFixedLengthVector(VecVT);
3207     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3208   }
3209 
3210   MVT XLenVT = Subtarget.getXLenVT();
3211 
3212   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3213   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3214   // Even i64-element vectors on RV32 can be lowered without scalar
3215   // legalization if the most-significant 32 bits of the value are not affected
3216   // by the sign-extension of the lower 32 bits.
3217   // TODO: We could also catch sign extensions of a 32-bit value.
3218   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3219     const auto *CVal = cast<ConstantSDNode>(Val);
3220     if (isInt<32>(CVal->getSExtValue())) {
3221       IsLegalInsert = true;
3222       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3223     }
3224   }
3225 
3226   SDValue Mask, VL;
3227   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3228 
3229   SDValue ValInVec;
3230 
3231   if (IsLegalInsert) {
3232     unsigned Opc =
3233         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3234     if (isNullConstant(Idx)) {
3235       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3236       if (!VecVT.isFixedLengthVector())
3237         return Vec;
3238       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3239     }
3240     ValInVec =
3241         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3242   } else {
3243     // On RV32, i64-element vectors must be specially handled to place the
3244     // value at element 0, by using two vslide1up instructions in sequence on
3245     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3246     // this.
3247     SDValue One = DAG.getConstant(1, DL, XLenVT);
3248     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3249     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3250     MVT I32ContainerVT =
3251         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3252     SDValue I32Mask =
3253         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3254     // Limit the active VL to two.
3255     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3256     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3257     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3258     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3259                            InsertI64VL);
3260     // First slide in the hi value, then the lo in underneath it.
3261     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3262                            ValHi, I32Mask, InsertI64VL);
3263     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3264                            ValLo, I32Mask, InsertI64VL);
3265     // Bitcast back to the right container type.
3266     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3267   }
3268 
3269   // Now that the value is in a vector, slide it into position.
3270   SDValue InsertVL =
3271       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3272   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3273                                 ValInVec, Idx, Mask, InsertVL);
3274   if (!VecVT.isFixedLengthVector())
3275     return Slideup;
3276   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3277 }
3278 
3279 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3280 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3281 // types this is done using VMV_X_S to allow us to glean information about the
3282 // sign bits of the result.
3283 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3284                                                      SelectionDAG &DAG) const {
3285   SDLoc DL(Op);
3286   SDValue Idx = Op.getOperand(1);
3287   SDValue Vec = Op.getOperand(0);
3288   EVT EltVT = Op.getValueType();
3289   MVT VecVT = Vec.getSimpleValueType();
3290   MVT XLenVT = Subtarget.getXLenVT();
3291 
3292   if (VecVT.getVectorElementType() == MVT::i1) {
3293     // FIXME: For now we just promote to an i8 vector and extract from that,
3294     // but this is probably not optimal.
3295     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3296     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3297     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3298   }
3299 
3300   // If this is a fixed vector, we need to convert it to a scalable vector.
3301   MVT ContainerVT = VecVT;
3302   if (VecVT.isFixedLengthVector()) {
3303     ContainerVT = getContainerForFixedLengthVector(VecVT);
3304     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3305   }
3306 
3307   // If the index is 0, the vector is already in the right position.
3308   if (!isNullConstant(Idx)) {
3309     // Use a VL of 1 to avoid processing more elements than we need.
3310     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3311     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3312     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3313     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3314                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3315   }
3316 
3317   if (!EltVT.isInteger()) {
3318     // Floating-point extracts are handled in TableGen.
3319     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3320                        DAG.getConstant(0, DL, XLenVT));
3321   }
3322 
3323   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3324   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3325 }
3326 
3327 // Some RVV intrinsics may claim that they want an integer operand to be
3328 // promoted or expanded.
3329 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3330                                           const RISCVSubtarget &Subtarget) {
3331   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3332           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3333          "Unexpected opcode");
3334 
3335   if (!Subtarget.hasStdExtV())
3336     return SDValue();
3337 
3338   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3339   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3340   SDLoc DL(Op);
3341 
3342   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3343       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3344   if (!II || !II->SplatOperand)
3345     return SDValue();
3346 
3347   unsigned SplatOp = II->SplatOperand + HasChain;
3348   assert(SplatOp < Op.getNumOperands());
3349 
3350   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3351   SDValue &ScalarOp = Operands[SplatOp];
3352   MVT OpVT = ScalarOp.getSimpleValueType();
3353   MVT XLenVT = Subtarget.getXLenVT();
3354 
3355   // If this isn't a scalar, or its type is XLenVT we're done.
3356   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3357     return SDValue();
3358 
3359   // Simplest case is that the operand needs to be promoted to XLenVT.
3360   if (OpVT.bitsLT(XLenVT)) {
3361     // If the operand is a constant, sign extend to increase our chances
3362     // of being able to use a .vi instruction. ANY_EXTEND would become a
3363     // a zero extend and the simm5 check in isel would fail.
3364     // FIXME: Should we ignore the upper bits in isel instead?
3365     unsigned ExtOpc =
3366         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3367     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3368     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3369   }
3370 
3371   // Use the previous operand to get the vXi64 VT. The result might be a mask
3372   // VT for compares. Using the previous operand assumes that the previous
3373   // operand will never have a smaller element size than a scalar operand and
3374   // that a widening operation never uses SEW=64.
3375   // NOTE: If this fails the below assert, we can probably just find the
3376   // element count from any operand or result and use it to construct the VT.
3377   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3378   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3379 
3380   // The more complex case is when the scalar is larger than XLenVT.
3381   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3382          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3383 
3384   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3385   // on the instruction to sign-extend since SEW>XLEN.
3386   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3387     if (isInt<32>(CVal->getSExtValue())) {
3388       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3389       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3390     }
3391   }
3392 
3393   // We need to convert the scalar to a splat vector.
3394   // FIXME: Can we implicitly truncate the scalar if it is known to
3395   // be sign extended?
3396   // VL should be the last operand.
3397   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3398   assert(VL.getValueType() == XLenVT);
3399   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3400   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3401 }
3402 
3403 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3404                                                      SelectionDAG &DAG) const {
3405   unsigned IntNo = Op.getConstantOperandVal(0);
3406   SDLoc DL(Op);
3407   MVT XLenVT = Subtarget.getXLenVT();
3408 
3409   switch (IntNo) {
3410   default:
3411     break; // Don't custom lower most intrinsics.
3412   case Intrinsic::thread_pointer: {
3413     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3414     return DAG.getRegister(RISCV::X4, PtrVT);
3415   }
3416   case Intrinsic::riscv_orc_b:
3417     // Lower to the GORCI encoding for orc.b.
3418     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3419                        DAG.getConstant(7, DL, XLenVT));
3420   case Intrinsic::riscv_grev:
3421   case Intrinsic::riscv_gorc: {
3422     unsigned Opc =
3423         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3424     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3425   }
3426   case Intrinsic::riscv_shfl:
3427   case Intrinsic::riscv_unshfl: {
3428     unsigned Opc =
3429         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3430     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3431   }
3432   case Intrinsic::riscv_bcompress:
3433   case Intrinsic::riscv_bdecompress: {
3434     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3435                                                        : RISCVISD::BDECOMPRESS;
3436     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3437   }
3438   case Intrinsic::riscv_vmv_x_s:
3439     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3440     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3441                        Op.getOperand(1));
3442   case Intrinsic::riscv_vmv_v_x:
3443     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3444                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3445   case Intrinsic::riscv_vfmv_v_f:
3446     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3447                        Op.getOperand(1), Op.getOperand(2));
3448   case Intrinsic::riscv_vmv_s_x: {
3449     SDValue Scalar = Op.getOperand(2);
3450 
3451     if (Scalar.getValueType().bitsLE(XLenVT)) {
3452       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3453       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3454                          Op.getOperand(1), Scalar, Op.getOperand(3));
3455     }
3456 
3457     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3458 
3459     // This is an i64 value that lives in two scalar registers. We have to
3460     // insert this in a convoluted way. First we build vXi64 splat containing
3461     // the/ two values that we assemble using some bit math. Next we'll use
3462     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3463     // to merge element 0 from our splat into the source vector.
3464     // FIXME: This is probably not the best way to do this, but it is
3465     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3466     // point.
3467     //   sw lo, (a0)
3468     //   sw hi, 4(a0)
3469     //   vlse vX, (a0)
3470     //
3471     //   vid.v      vVid
3472     //   vmseq.vx   mMask, vVid, 0
3473     //   vmerge.vvm vDest, vSrc, vVal, mMask
3474     MVT VT = Op.getSimpleValueType();
3475     SDValue Vec = Op.getOperand(1);
3476     SDValue VL = Op.getOperand(3);
3477 
3478     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3479     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3480                                       DAG.getConstant(0, DL, MVT::i32), VL);
3481 
3482     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3483     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3484     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3485     SDValue SelectCond =
3486         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3487                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3488     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3489                        Vec, VL);
3490   }
3491   case Intrinsic::riscv_vslide1up:
3492   case Intrinsic::riscv_vslide1down:
3493   case Intrinsic::riscv_vslide1up_mask:
3494   case Intrinsic::riscv_vslide1down_mask: {
3495     // We need to special case these when the scalar is larger than XLen.
3496     unsigned NumOps = Op.getNumOperands();
3497     bool IsMasked = NumOps == 6;
3498     unsigned OpOffset = IsMasked ? 1 : 0;
3499     SDValue Scalar = Op.getOperand(2 + OpOffset);
3500     if (Scalar.getValueType().bitsLE(XLenVT))
3501       break;
3502 
3503     // Splatting a sign extended constant is fine.
3504     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3505       if (isInt<32>(CVal->getSExtValue()))
3506         break;
3507 
3508     MVT VT = Op.getSimpleValueType();
3509     assert(VT.getVectorElementType() == MVT::i64 &&
3510            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3511 
3512     // Convert the vector source to the equivalent nxvXi32 vector.
3513     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3514     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3515 
3516     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3517                                    DAG.getConstant(0, DL, XLenVT));
3518     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3519                                    DAG.getConstant(1, DL, XLenVT));
3520 
3521     // Double the VL since we halved SEW.
3522     SDValue VL = Op.getOperand(NumOps - 1);
3523     SDValue I32VL =
3524         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3525 
3526     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3527     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3528 
3529     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3530     // instructions.
3531     if (IntNo == Intrinsic::riscv_vslide1up ||
3532         IntNo == Intrinsic::riscv_vslide1up_mask) {
3533       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3534                         I32Mask, I32VL);
3535       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3536                         I32Mask, I32VL);
3537     } else {
3538       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3539                         I32Mask, I32VL);
3540       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3541                         I32Mask, I32VL);
3542     }
3543 
3544     // Convert back to nxvXi64.
3545     Vec = DAG.getBitcast(VT, Vec);
3546 
3547     if (!IsMasked)
3548       return Vec;
3549 
3550     // Apply mask after the operation.
3551     SDValue Mask = Op.getOperand(NumOps - 2);
3552     SDValue MaskedOff = Op.getOperand(1);
3553     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3554   }
3555   }
3556 
3557   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3558 }
3559 
3560 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3561                                                     SelectionDAG &DAG) const {
3562   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3563 }
3564 
3565 static MVT getLMUL1VT(MVT VT) {
3566   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3567          "Unexpected vector MVT");
3568   return MVT::getScalableVectorVT(
3569       VT.getVectorElementType(),
3570       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3571 }
3572 
3573 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3574   switch (ISDOpcode) {
3575   default:
3576     llvm_unreachable("Unhandled reduction");
3577   case ISD::VECREDUCE_ADD:
3578     return RISCVISD::VECREDUCE_ADD_VL;
3579   case ISD::VECREDUCE_UMAX:
3580     return RISCVISD::VECREDUCE_UMAX_VL;
3581   case ISD::VECREDUCE_SMAX:
3582     return RISCVISD::VECREDUCE_SMAX_VL;
3583   case ISD::VECREDUCE_UMIN:
3584     return RISCVISD::VECREDUCE_UMIN_VL;
3585   case ISD::VECREDUCE_SMIN:
3586     return RISCVISD::VECREDUCE_SMIN_VL;
3587   case ISD::VECREDUCE_AND:
3588     return RISCVISD::VECREDUCE_AND_VL;
3589   case ISD::VECREDUCE_OR:
3590     return RISCVISD::VECREDUCE_OR_VL;
3591   case ISD::VECREDUCE_XOR:
3592     return RISCVISD::VECREDUCE_XOR_VL;
3593   }
3594 }
3595 
3596 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3597                                                       SelectionDAG &DAG) const {
3598   SDLoc DL(Op);
3599   SDValue Vec = Op.getOperand(0);
3600   MVT VecVT = Vec.getSimpleValueType();
3601   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3602           Op.getOpcode() == ISD::VECREDUCE_OR ||
3603           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3604          "Unexpected reduction lowering");
3605 
3606   MVT XLenVT = Subtarget.getXLenVT();
3607   assert(Op.getValueType() == XLenVT &&
3608          "Expected reduction output to be legalized to XLenVT");
3609 
3610   MVT ContainerVT = VecVT;
3611   if (VecVT.isFixedLengthVector()) {
3612     ContainerVT = getContainerForFixedLengthVector(VecVT);
3613     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3614   }
3615 
3616   SDValue Mask, VL;
3617   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3618   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3619 
3620   switch (Op.getOpcode()) {
3621   default:
3622     llvm_unreachable("Unhandled reduction");
3623   case ISD::VECREDUCE_AND:
3624     // vpopc ~x == 0
3625     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3626     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3627     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3628   case ISD::VECREDUCE_OR:
3629     // vpopc x != 0
3630     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3631     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3632   case ISD::VECREDUCE_XOR: {
3633     // ((vpopc x) & 1) != 0
3634     SDValue One = DAG.getConstant(1, DL, XLenVT);
3635     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3636     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3637     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3638   }
3639   }
3640 }
3641 
3642 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3643                                             SelectionDAG &DAG) const {
3644   SDLoc DL(Op);
3645   SDValue Vec = Op.getOperand(0);
3646   EVT VecEVT = Vec.getValueType();
3647 
3648   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3649 
3650   // Due to ordering in legalize types we may have a vector type that needs to
3651   // be split. Do that manually so we can get down to a legal type.
3652   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3653          TargetLowering::TypeSplitVector) {
3654     SDValue Lo, Hi;
3655     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3656     VecEVT = Lo.getValueType();
3657     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3658   }
3659 
3660   // TODO: The type may need to be widened rather than split. Or widened before
3661   // it can be split.
3662   if (!isTypeLegal(VecEVT))
3663     return SDValue();
3664 
3665   MVT VecVT = VecEVT.getSimpleVT();
3666   MVT VecEltVT = VecVT.getVectorElementType();
3667   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3668 
3669   MVT ContainerVT = VecVT;
3670   if (VecVT.isFixedLengthVector()) {
3671     ContainerVT = getContainerForFixedLengthVector(VecVT);
3672     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3673   }
3674 
3675   MVT M1VT = getLMUL1VT(ContainerVT);
3676 
3677   SDValue Mask, VL;
3678   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3679 
3680   // FIXME: This is a VLMAX splat which might be too large and can prevent
3681   // vsetvli removal.
3682   SDValue NeutralElem =
3683       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3684   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3685   SDValue Reduction =
3686       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3687   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3688                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3689   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3690 }
3691 
3692 // Given a reduction op, this function returns the matching reduction opcode,
3693 // the vector SDValue and the scalar SDValue required to lower this to a
3694 // RISCVISD node.
3695 static std::tuple<unsigned, SDValue, SDValue>
3696 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3697   SDLoc DL(Op);
3698   auto Flags = Op->getFlags();
3699   unsigned Opcode = Op.getOpcode();
3700   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3701   switch (Opcode) {
3702   default:
3703     llvm_unreachable("Unhandled reduction");
3704   case ISD::VECREDUCE_FADD:
3705     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3706                            DAG.getConstantFP(0.0, DL, EltVT));
3707   case ISD::VECREDUCE_SEQ_FADD:
3708     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3709                            Op.getOperand(0));
3710   case ISD::VECREDUCE_FMIN:
3711     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3712                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3713   case ISD::VECREDUCE_FMAX:
3714     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3715                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3716   }
3717 }
3718 
3719 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3720                                               SelectionDAG &DAG) const {
3721   SDLoc DL(Op);
3722   MVT VecEltVT = Op.getSimpleValueType();
3723 
3724   unsigned RVVOpcode;
3725   SDValue VectorVal, ScalarVal;
3726   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3727       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3728   MVT VecVT = VectorVal.getSimpleValueType();
3729 
3730   MVT ContainerVT = VecVT;
3731   if (VecVT.isFixedLengthVector()) {
3732     ContainerVT = getContainerForFixedLengthVector(VecVT);
3733     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3734   }
3735 
3736   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3737 
3738   SDValue Mask, VL;
3739   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3740 
3741   // FIXME: This is a VLMAX splat which might be too large and can prevent
3742   // vsetvli removal.
3743   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3744   SDValue Reduction =
3745       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3746   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3747                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3748 }
3749 
3750 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3751                                                    SelectionDAG &DAG) const {
3752   SDValue Vec = Op.getOperand(0);
3753   SDValue SubVec = Op.getOperand(1);
3754   MVT VecVT = Vec.getSimpleValueType();
3755   MVT SubVecVT = SubVec.getSimpleValueType();
3756 
3757   SDLoc DL(Op);
3758   MVT XLenVT = Subtarget.getXLenVT();
3759   unsigned OrigIdx = Op.getConstantOperandVal(2);
3760   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3761 
3762   // We don't have the ability to slide mask vectors up indexed by their i1
3763   // elements; the smallest we can do is i8. Often we are able to bitcast to
3764   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3765   // into a scalable one, we might not necessarily have enough scalable
3766   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3767   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3768       (OrigIdx != 0 || !Vec.isUndef())) {
3769     if (VecVT.getVectorMinNumElements() >= 8 &&
3770         SubVecVT.getVectorMinNumElements() >= 8) {
3771       assert(OrigIdx % 8 == 0 && "Invalid index");
3772       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3773              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3774              "Unexpected mask vector lowering");
3775       OrigIdx /= 8;
3776       SubVecVT =
3777           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3778                            SubVecVT.isScalableVector());
3779       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3780                                VecVT.isScalableVector());
3781       Vec = DAG.getBitcast(VecVT, Vec);
3782       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3783     } else {
3784       // We can't slide this mask vector up indexed by its i1 elements.
3785       // This poses a problem when we wish to insert a scalable vector which
3786       // can't be re-expressed as a larger type. Just choose the slow path and
3787       // extend to a larger type, then truncate back down.
3788       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3789       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3790       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3791       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3792       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3793                         Op.getOperand(2));
3794       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3795       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3796     }
3797   }
3798 
3799   // If the subvector vector is a fixed-length type, we cannot use subregister
3800   // manipulation to simplify the codegen; we don't know which register of a
3801   // LMUL group contains the specific subvector as we only know the minimum
3802   // register size. Therefore we must slide the vector group up the full
3803   // amount.
3804   if (SubVecVT.isFixedLengthVector()) {
3805     if (OrigIdx == 0 && Vec.isUndef())
3806       return Op;
3807     MVT ContainerVT = VecVT;
3808     if (VecVT.isFixedLengthVector()) {
3809       ContainerVT = getContainerForFixedLengthVector(VecVT);
3810       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3811     }
3812     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3813                          DAG.getUNDEF(ContainerVT), SubVec,
3814                          DAG.getConstant(0, DL, XLenVT));
3815     SDValue Mask =
3816         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3817     // Set the vector length to only the number of elements we care about. Note
3818     // that for slideup this includes the offset.
3819     SDValue VL =
3820         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3821     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3822     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3823                                   SubVec, SlideupAmt, Mask, VL);
3824     if (VecVT.isFixedLengthVector())
3825       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3826     return DAG.getBitcast(Op.getValueType(), Slideup);
3827   }
3828 
3829   unsigned SubRegIdx, RemIdx;
3830   std::tie(SubRegIdx, RemIdx) =
3831       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3832           VecVT, SubVecVT, OrigIdx, TRI);
3833 
3834   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3835   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3836                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3837                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3838 
3839   // 1. If the Idx has been completely eliminated and this subvector's size is
3840   // a vector register or a multiple thereof, or the surrounding elements are
3841   // undef, then this is a subvector insert which naturally aligns to a vector
3842   // register. These can easily be handled using subregister manipulation.
3843   // 2. If the subvector is smaller than a vector register, then the insertion
3844   // must preserve the undisturbed elements of the register. We do this by
3845   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3846   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3847   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3848   // LMUL=1 type back into the larger vector (resolving to another subregister
3849   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3850   // to avoid allocating a large register group to hold our subvector.
3851   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3852     return Op;
3853 
3854   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3855   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3856   // (in our case undisturbed). This means we can set up a subvector insertion
3857   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3858   // size of the subvector.
3859   MVT InterSubVT = VecVT;
3860   SDValue AlignedExtract = Vec;
3861   unsigned AlignedIdx = OrigIdx - RemIdx;
3862   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3863     InterSubVT = getLMUL1VT(VecVT);
3864     // Extract a subvector equal to the nearest full vector register type. This
3865     // should resolve to a EXTRACT_SUBREG instruction.
3866     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3867                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3868   }
3869 
3870   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3871   // For scalable vectors this must be further multiplied by vscale.
3872   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3873 
3874   SDValue Mask, VL;
3875   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3876 
3877   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3878   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3879   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3880   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3881 
3882   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3883                        DAG.getUNDEF(InterSubVT), SubVec,
3884                        DAG.getConstant(0, DL, XLenVT));
3885 
3886   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3887                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3888 
3889   // If required, insert this subvector back into the correct vector register.
3890   // This should resolve to an INSERT_SUBREG instruction.
3891   if (VecVT.bitsGT(InterSubVT))
3892     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3893                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3894 
3895   // We might have bitcast from a mask type: cast back to the original type if
3896   // required.
3897   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3898 }
3899 
3900 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3901                                                     SelectionDAG &DAG) const {
3902   SDValue Vec = Op.getOperand(0);
3903   MVT SubVecVT = Op.getSimpleValueType();
3904   MVT VecVT = Vec.getSimpleValueType();
3905 
3906   SDLoc DL(Op);
3907   MVT XLenVT = Subtarget.getXLenVT();
3908   unsigned OrigIdx = Op.getConstantOperandVal(1);
3909   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3910 
3911   // We don't have the ability to slide mask vectors down indexed by their i1
3912   // elements; the smallest we can do is i8. Often we are able to bitcast to
3913   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3914   // from a scalable one, we might not necessarily have enough scalable
3915   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3916   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3917     if (VecVT.getVectorMinNumElements() >= 8 &&
3918         SubVecVT.getVectorMinNumElements() >= 8) {
3919       assert(OrigIdx % 8 == 0 && "Invalid index");
3920       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3921              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3922              "Unexpected mask vector lowering");
3923       OrigIdx /= 8;
3924       SubVecVT =
3925           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3926                            SubVecVT.isScalableVector());
3927       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3928                                VecVT.isScalableVector());
3929       Vec = DAG.getBitcast(VecVT, Vec);
3930     } else {
3931       // We can't slide this mask vector down, indexed by its i1 elements.
3932       // This poses a problem when we wish to extract a scalable vector which
3933       // can't be re-expressed as a larger type. Just choose the slow path and
3934       // extend to a larger type, then truncate back down.
3935       // TODO: We could probably improve this when extracting certain fixed
3936       // from fixed, where we can extract as i8 and shift the correct element
3937       // right to reach the desired subvector?
3938       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3939       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3940       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3941       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3942                         Op.getOperand(1));
3943       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3944       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3945     }
3946   }
3947 
3948   // If the subvector vector is a fixed-length type, we cannot use subregister
3949   // manipulation to simplify the codegen; we don't know which register of a
3950   // LMUL group contains the specific subvector as we only know the minimum
3951   // register size. Therefore we must slide the vector group down the full
3952   // amount.
3953   if (SubVecVT.isFixedLengthVector()) {
3954     // With an index of 0 this is a cast-like subvector, which can be performed
3955     // with subregister operations.
3956     if (OrigIdx == 0)
3957       return Op;
3958     MVT ContainerVT = VecVT;
3959     if (VecVT.isFixedLengthVector()) {
3960       ContainerVT = getContainerForFixedLengthVector(VecVT);
3961       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3962     }
3963     SDValue Mask =
3964         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3965     // Set the vector length to only the number of elements we care about. This
3966     // avoids sliding down elements we're going to discard straight away.
3967     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3968     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3969     SDValue Slidedown =
3970         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3971                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3972     // Now we can use a cast-like subvector extract to get the result.
3973     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3974                             DAG.getConstant(0, DL, XLenVT));
3975     return DAG.getBitcast(Op.getValueType(), Slidedown);
3976   }
3977 
3978   unsigned SubRegIdx, RemIdx;
3979   std::tie(SubRegIdx, RemIdx) =
3980       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3981           VecVT, SubVecVT, OrigIdx, TRI);
3982 
3983   // If the Idx has been completely eliminated then this is a subvector extract
3984   // which naturally aligns to a vector register. These can easily be handled
3985   // using subregister manipulation.
3986   if (RemIdx == 0)
3987     return Op;
3988 
3989   // Else we must shift our vector register directly to extract the subvector.
3990   // Do this using VSLIDEDOWN.
3991 
3992   // If the vector type is an LMUL-group type, extract a subvector equal to the
3993   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3994   // instruction.
3995   MVT InterSubVT = VecVT;
3996   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3997     InterSubVT = getLMUL1VT(VecVT);
3998     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3999                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4000   }
4001 
4002   // Slide this vector register down by the desired number of elements in order
4003   // to place the desired subvector starting at element 0.
4004   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4005   // For scalable vectors this must be further multiplied by vscale.
4006   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4007 
4008   SDValue Mask, VL;
4009   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4010   SDValue Slidedown =
4011       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4012                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4013 
4014   // Now the vector is in the right position, extract our final subvector. This
4015   // should resolve to a COPY.
4016   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4017                           DAG.getConstant(0, DL, XLenVT));
4018 
4019   // We might have bitcast from a mask type: cast back to the original type if
4020   // required.
4021   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4022 }
4023 
4024 // Lower step_vector to the vid instruction. Any non-identity step value must
4025 // be accounted for my manual expansion.
4026 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4027                                               SelectionDAG &DAG) const {
4028   SDLoc DL(Op);
4029   MVT VT = Op.getSimpleValueType();
4030   MVT XLenVT = Subtarget.getXLenVT();
4031   SDValue Mask, VL;
4032   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4033   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4034   uint64_t StepValImm = Op.getConstantOperandVal(0);
4035   if (StepValImm != 1) {
4036     assert(Op.getOperand(0).getValueType() == XLenVT &&
4037            "Unexpected step value type");
4038     if (isPowerOf2_64(StepValImm)) {
4039       SDValue StepVal =
4040           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4041                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4042       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4043     } else {
4044       SDValue StepVal =
4045           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
4046       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4047     }
4048   }
4049   return StepVec;
4050 }
4051 
4052 // Implement vector_reverse using vrgather.vv with indices determined by
4053 // subtracting the id of each element from (VLMAX-1). This will convert
4054 // the indices like so:
4055 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4056 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
4057 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4058                                                  SelectionDAG &DAG) const {
4059   SDLoc DL(Op);
4060   MVT VecVT = Op.getSimpleValueType();
4061   unsigned EltSize = VecVT.getScalarSizeInBits();
4062   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4063 
4064   unsigned MaxVLMAX = 0;
4065   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4066   if (VectorBitsMax != 0)
4067     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4068 
4069   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4070   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4071 
4072   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4073   // to use vrgatherei16.vv.
4074   // TODO: It's also possible to use vrgatherei16.vv for other types to
4075   // decrease register width for the index calculation.
4076   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4077     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4078     // Reverse each half, then reassemble them in reverse order.
4079     // NOTE: It's also possible that after splitting that VLMAX no longer
4080     // requires vrgatherei16.vv.
4081     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4082       SDValue Lo, Hi;
4083       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4084       EVT LoVT, HiVT;
4085       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4086       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4087       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4088       // Reassemble the low and high pieces reversed.
4089       // FIXME: This is a CONCAT_VECTORS.
4090       SDValue Res =
4091           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4092                       DAG.getIntPtrConstant(0, DL));
4093       return DAG.getNode(
4094           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4095           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4096     }
4097 
4098     // Just promote the int type to i16 which will double the LMUL.
4099     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4100     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4101   }
4102 
4103   MVT XLenVT = Subtarget.getXLenVT();
4104   SDValue Mask, VL;
4105   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4106 
4107   // Calculate VLMAX-1 for the desired SEW.
4108   unsigned MinElts = VecVT.getVectorMinNumElements();
4109   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4110                               DAG.getConstant(MinElts, DL, XLenVT));
4111   SDValue VLMinus1 =
4112       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4113 
4114   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4115   bool IsRV32E64 =
4116       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4117   SDValue SplatVL;
4118   if (!IsRV32E64)
4119     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4120   else
4121     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4122 
4123   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4124   SDValue Indices =
4125       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4126 
4127   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4128 }
4129 
4130 SDValue
4131 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4132                                                      SelectionDAG &DAG) const {
4133   SDLoc DL(Op);
4134   auto *Load = cast<LoadSDNode>(Op);
4135 
4136   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4137                                         Load->getMemoryVT(),
4138                                         *Load->getMemOperand()) &&
4139          "Expecting a correctly-aligned load");
4140 
4141   MVT VT = Op.getSimpleValueType();
4142   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4143 
4144   SDValue VL =
4145       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4146 
4147   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4148   SDValue NewLoad = DAG.getMemIntrinsicNode(
4149       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4150       Load->getMemoryVT(), Load->getMemOperand());
4151 
4152   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4153   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4154 }
4155 
4156 SDValue
4157 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4158                                                       SelectionDAG &DAG) const {
4159   SDLoc DL(Op);
4160   auto *Store = cast<StoreSDNode>(Op);
4161 
4162   assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4163                                         Store->getMemoryVT(),
4164                                         *Store->getMemOperand()) &&
4165          "Expecting a correctly-aligned store");
4166 
4167   SDValue StoreVal = Store->getValue();
4168   MVT VT = StoreVal.getSimpleValueType();
4169 
4170   // If the size less than a byte, we need to pad with zeros to make a byte.
4171   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4172     VT = MVT::v8i1;
4173     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4174                            DAG.getConstant(0, DL, VT), StoreVal,
4175                            DAG.getIntPtrConstant(0, DL));
4176   }
4177 
4178   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4179 
4180   SDValue VL =
4181       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4182 
4183   SDValue NewValue =
4184       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4185   return DAG.getMemIntrinsicNode(
4186       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4187       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4188       Store->getMemoryVT(), Store->getMemOperand());
4189 }
4190 
4191 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4192   auto *Load = cast<MaskedLoadSDNode>(Op);
4193 
4194   SDLoc DL(Op);
4195   MVT VT = Op.getSimpleValueType();
4196   MVT XLenVT = Subtarget.getXLenVT();
4197 
4198   SDValue Mask = Load->getMask();
4199   SDValue PassThru = Load->getPassThru();
4200   SDValue VL;
4201 
4202   MVT ContainerVT = VT;
4203   if (VT.isFixedLengthVector()) {
4204     ContainerVT = getContainerForFixedLengthVector(VT);
4205     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4206 
4207     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4208     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4209     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4210   } else
4211     VL = DAG.getRegister(RISCV::X0, XLenVT);
4212 
4213   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4214   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4215   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4216                    Load->getBasePtr(), Mask,  VL};
4217   SDValue Result =
4218       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4219                               Load->getMemoryVT(), Load->getMemOperand());
4220   SDValue Chain = Result.getValue(1);
4221 
4222   if (VT.isFixedLengthVector())
4223     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4224 
4225   return DAG.getMergeValues({Result, Chain}, DL);
4226 }
4227 
4228 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4229   auto *Store = cast<MaskedStoreSDNode>(Op);
4230 
4231   SDLoc DL(Op);
4232   SDValue Val = Store->getValue();
4233   SDValue Mask = Store->getMask();
4234   MVT VT = Val.getSimpleValueType();
4235   MVT XLenVT = Subtarget.getXLenVT();
4236   SDValue VL;
4237 
4238   MVT ContainerVT = VT;
4239   if (VT.isFixedLengthVector()) {
4240     ContainerVT = getContainerForFixedLengthVector(VT);
4241     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4242 
4243     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4244     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4245     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4246   } else
4247     VL = DAG.getRegister(RISCV::X0, XLenVT);
4248 
4249   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4250   return DAG.getMemIntrinsicNode(
4251       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4252       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4253       Store->getMemoryVT(), Store->getMemOperand());
4254 }
4255 
4256 SDValue
4257 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4258                                                       SelectionDAG &DAG) const {
4259   MVT InVT = Op.getOperand(0).getSimpleValueType();
4260   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4261 
4262   MVT VT = Op.getSimpleValueType();
4263 
4264   SDValue Op1 =
4265       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4266   SDValue Op2 =
4267       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4268 
4269   SDLoc DL(Op);
4270   SDValue VL =
4271       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4272 
4273   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4274   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4275 
4276   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4277                             Op.getOperand(2), Mask, VL);
4278 
4279   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4280 }
4281 
4282 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4283     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4284   MVT VT = Op.getSimpleValueType();
4285 
4286   if (VT.getVectorElementType() == MVT::i1)
4287     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4288 
4289   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4290 }
4291 
4292 // Lower vector ABS to smax(X, sub(0, X)).
4293 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4294   SDLoc DL(Op);
4295   MVT VT = Op.getSimpleValueType();
4296   SDValue X = Op.getOperand(0);
4297 
4298   assert(VT.isFixedLengthVector() && "Unexpected type");
4299 
4300   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4301   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4302 
4303   SDValue Mask, VL;
4304   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4305 
4306   SDValue SplatZero =
4307       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4308                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4309   SDValue NegX =
4310       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4311   SDValue Max =
4312       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4313 
4314   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4315 }
4316 
4317 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4318     SDValue Op, SelectionDAG &DAG) const {
4319   SDLoc DL(Op);
4320   MVT VT = Op.getSimpleValueType();
4321   SDValue Mag = Op.getOperand(0);
4322   SDValue Sign = Op.getOperand(1);
4323   assert(Mag.getValueType() == Sign.getValueType() &&
4324          "Can only handle COPYSIGN with matching types.");
4325 
4326   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4327   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4328   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4329 
4330   SDValue Mask, VL;
4331   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4332 
4333   SDValue CopySign =
4334       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4335 
4336   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4337 }
4338 
4339 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4340     SDValue Op, SelectionDAG &DAG) const {
4341   MVT VT = Op.getSimpleValueType();
4342   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4343 
4344   MVT I1ContainerVT =
4345       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4346 
4347   SDValue CC =
4348       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4349   SDValue Op1 =
4350       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4351   SDValue Op2 =
4352       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4353 
4354   SDLoc DL(Op);
4355   SDValue Mask, VL;
4356   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4357 
4358   SDValue Select =
4359       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4360 
4361   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4362 }
4363 
4364 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4365                                                unsigned NewOpc,
4366                                                bool HasMask) const {
4367   MVT VT = Op.getSimpleValueType();
4368   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4369 
4370   // Create list of operands by converting existing ones to scalable types.
4371   SmallVector<SDValue, 6> Ops;
4372   for (const SDValue &V : Op->op_values()) {
4373     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4374 
4375     // Pass through non-vector operands.
4376     if (!V.getValueType().isVector()) {
4377       Ops.push_back(V);
4378       continue;
4379     }
4380 
4381     // "cast" fixed length vector to a scalable vector.
4382     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4383            "Only fixed length vectors are supported!");
4384     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4385   }
4386 
4387   SDLoc DL(Op);
4388   SDValue Mask, VL;
4389   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4390   if (HasMask)
4391     Ops.push_back(Mask);
4392   Ops.push_back(VL);
4393 
4394   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4395   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4396 }
4397 
4398 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4399 // * Operands of each node are assumed to be in the same order.
4400 // * The EVL operand is promoted from i32 to i64 on RV64.
4401 // * Fixed-length vectors are converted to their scalable-vector container
4402 //   types.
4403 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4404                                        unsigned RISCVISDOpc) const {
4405   SDLoc DL(Op);
4406   MVT VT = Op.getSimpleValueType();
4407   SmallVector<SDValue, 4> Ops;
4408 
4409   for (const auto &OpIdx : enumerate(Op->ops())) {
4410     SDValue V = OpIdx.value();
4411     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4412     // Pass through operands which aren't fixed-length vectors.
4413     if (!V.getValueType().isFixedLengthVector()) {
4414       Ops.push_back(V);
4415       continue;
4416     }
4417     // "cast" fixed length vector to a scalable vector.
4418     MVT OpVT = V.getSimpleValueType();
4419     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4420     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4421            "Only fixed length vectors are supported!");
4422     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4423   }
4424 
4425   if (!VT.isFixedLengthVector())
4426     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4427 
4428   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4429 
4430   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4431 
4432   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4433 }
4434 
4435 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4436 // a RVV indexed load. The RVV indexed load instructions only support the
4437 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4438 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4439 // indexing is extended to the XLEN value type and scaled accordingly.
4440 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4441   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4442   SDLoc DL(Op);
4443 
4444   SDValue Index = MGN->getIndex();
4445   SDValue Mask = MGN->getMask();
4446   SDValue PassThru = MGN->getPassThru();
4447 
4448   MVT VT = Op.getSimpleValueType();
4449   MVT IndexVT = Index.getSimpleValueType();
4450   MVT XLenVT = Subtarget.getXLenVT();
4451 
4452   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4453          "Unexpected VTs!");
4454   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4455          "Unexpected pointer type");
4456   // Targets have to explicitly opt-in for extending vector loads.
4457   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4458          "Unexpected extending MGATHER");
4459 
4460   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4461   // the selection of the masked intrinsics doesn't do this for us.
4462   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4463 
4464   SDValue VL;
4465   MVT ContainerVT = VT;
4466   if (VT.isFixedLengthVector()) {
4467     // We need to use the larger of the result and index type to determine the
4468     // scalable type to use so we don't increase LMUL for any operand/result.
4469     if (VT.bitsGE(IndexVT)) {
4470       ContainerVT = getContainerForFixedLengthVector(VT);
4471       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4472                                  ContainerVT.getVectorElementCount());
4473     } else {
4474       IndexVT = getContainerForFixedLengthVector(IndexVT);
4475       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4476                                      IndexVT.getVectorElementCount());
4477     }
4478 
4479     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4480 
4481     if (!IsUnmasked) {
4482       MVT MaskVT =
4483           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4484       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4485       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4486     }
4487 
4488     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4489   } else
4490     VL = DAG.getRegister(RISCV::X0, XLenVT);
4491 
4492   unsigned IntID =
4493       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4494   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4495                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4496   if (!IsUnmasked)
4497     Ops.push_back(PassThru);
4498   Ops.push_back(MGN->getBasePtr());
4499   Ops.push_back(Index);
4500   if (!IsUnmasked)
4501     Ops.push_back(Mask);
4502   Ops.push_back(VL);
4503 
4504   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4505   SDValue Result =
4506       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4507                               MGN->getMemoryVT(), MGN->getMemOperand());
4508   SDValue Chain = Result.getValue(1);
4509 
4510   if (VT.isFixedLengthVector())
4511     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4512 
4513   return DAG.getMergeValues({Result, Chain}, DL);
4514 }
4515 
4516 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4517 // a RVV indexed store. The RVV indexed store instructions only support the
4518 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4519 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4520 // indexing is extended to the XLEN value type and scaled accordingly.
4521 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4522                                            SelectionDAG &DAG) const {
4523   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4524   SDLoc DL(Op);
4525   SDValue Index = MSN->getIndex();
4526   SDValue Mask = MSN->getMask();
4527   SDValue Val = MSN->getValue();
4528 
4529   MVT VT = Val.getSimpleValueType();
4530   MVT IndexVT = Index.getSimpleValueType();
4531   MVT XLenVT = Subtarget.getXLenVT();
4532 
4533   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4534          "Unexpected VTs!");
4535   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4536          "Unexpected pointer type");
4537   // Targets have to explicitly opt-in for extending vector loads and
4538   // truncating vector stores.
4539   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4540 
4541   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4542   // the selection of the masked intrinsics doesn't do this for us.
4543   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4544 
4545   SDValue VL;
4546   if (VT.isFixedLengthVector()) {
4547     // We need to use the larger of the value and index type to determine the
4548     // scalable type to use so we don't increase LMUL for any operand/result.
4549     MVT ContainerVT;
4550     if (VT.bitsGE(IndexVT)) {
4551       ContainerVT = getContainerForFixedLengthVector(VT);
4552       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4553                                  ContainerVT.getVectorElementCount());
4554     } else {
4555       IndexVT = getContainerForFixedLengthVector(IndexVT);
4556       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4557                                      IndexVT.getVectorElementCount());
4558     }
4559 
4560     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4561     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4562 
4563     if (!IsUnmasked) {
4564       MVT MaskVT =
4565           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4566       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4567     }
4568 
4569     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4570   } else
4571     VL = DAG.getRegister(RISCV::X0, XLenVT);
4572 
4573   unsigned IntID =
4574       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4575   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4576                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4577   Ops.push_back(Val);
4578   Ops.push_back(MSN->getBasePtr());
4579   Ops.push_back(Index);
4580   if (!IsUnmasked)
4581     Ops.push_back(Mask);
4582   Ops.push_back(VL);
4583 
4584   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4585                                  MSN->getMemoryVT(), MSN->getMemOperand());
4586 }
4587 
4588 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4589                                                SelectionDAG &DAG) const {
4590   const MVT XLenVT = Subtarget.getXLenVT();
4591   SDLoc DL(Op);
4592   SDValue Chain = Op->getOperand(0);
4593   SDValue SysRegNo = DAG.getConstant(
4594       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4595   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4596   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4597 
4598   // Encoding used for rounding mode in RISCV differs from that used in
4599   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4600   // table, which consists of a sequence of 4-bit fields, each representing
4601   // corresponding FLT_ROUNDS mode.
4602   static const int Table =
4603       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4604       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4605       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4606       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4607       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4608 
4609   SDValue Shift =
4610       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4611   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4612                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4613   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4614                                DAG.getConstant(7, DL, XLenVT));
4615 
4616   return DAG.getMergeValues({Masked, Chain}, DL);
4617 }
4618 
4619 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4620                                                SelectionDAG &DAG) const {
4621   const MVT XLenVT = Subtarget.getXLenVT();
4622   SDLoc DL(Op);
4623   SDValue Chain = Op->getOperand(0);
4624   SDValue RMValue = Op->getOperand(1);
4625   SDValue SysRegNo = DAG.getConstant(
4626       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4627 
4628   // Encoding used for rounding mode in RISCV differs from that used in
4629   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4630   // a table, which consists of a sequence of 4-bit fields, each representing
4631   // corresponding RISCV mode.
4632   static const unsigned Table =
4633       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4634       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4635       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4636       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4637       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4638 
4639   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4640                               DAG.getConstant(2, DL, XLenVT));
4641   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4642                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4643   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4644                         DAG.getConstant(0x7, DL, XLenVT));
4645   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4646                      RMValue);
4647 }
4648 
4649 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4650 // form of the given Opcode.
4651 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4652   switch (Opcode) {
4653   default:
4654     llvm_unreachable("Unexpected opcode");
4655   case ISD::SHL:
4656     return RISCVISD::SLLW;
4657   case ISD::SRA:
4658     return RISCVISD::SRAW;
4659   case ISD::SRL:
4660     return RISCVISD::SRLW;
4661   case ISD::SDIV:
4662     return RISCVISD::DIVW;
4663   case ISD::UDIV:
4664     return RISCVISD::DIVUW;
4665   case ISD::UREM:
4666     return RISCVISD::REMUW;
4667   case ISD::ROTL:
4668     return RISCVISD::ROLW;
4669   case ISD::ROTR:
4670     return RISCVISD::RORW;
4671   case RISCVISD::GREV:
4672     return RISCVISD::GREVW;
4673   case RISCVISD::GORC:
4674     return RISCVISD::GORCW;
4675   }
4676 }
4677 
4678 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4679 // Because i32 isn't a legal type for RV64, these operations would otherwise
4680 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4681 // later one because the fact the operation was originally of type i32 is
4682 // lost.
4683 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4684                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4685   SDLoc DL(N);
4686   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4687   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4688   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4689   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4690   // ReplaceNodeResults requires we maintain the same type for the return value.
4691   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4692 }
4693 
4694 // Converts the given 32-bit operation to a i64 operation with signed extension
4695 // semantic to reduce the signed extension instructions.
4696 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4697   SDLoc DL(N);
4698   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4699   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4700   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4701   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4702                                DAG.getValueType(MVT::i32));
4703   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4704 }
4705 
4706 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4707                                              SmallVectorImpl<SDValue> &Results,
4708                                              SelectionDAG &DAG) const {
4709   SDLoc DL(N);
4710   switch (N->getOpcode()) {
4711   default:
4712     llvm_unreachable("Don't know how to custom type legalize this operation!");
4713   case ISD::STRICT_FP_TO_SINT:
4714   case ISD::STRICT_FP_TO_UINT:
4715   case ISD::FP_TO_SINT:
4716   case ISD::FP_TO_UINT: {
4717     bool IsStrict = N->isStrictFPOpcode();
4718     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4719            "Unexpected custom legalisation");
4720     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4721     // If the FP type needs to be softened, emit a library call using the 'si'
4722     // version. If we left it to default legalization we'd end up with 'di'. If
4723     // the FP type doesn't need to be softened just let generic type
4724     // legalization promote the result type.
4725     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4726         TargetLowering::TypeSoftenFloat)
4727       return;
4728     RTLIB::Libcall LC;
4729     if (N->getOpcode() == ISD::FP_TO_SINT ||
4730         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4731       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4732     else
4733       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4734     MakeLibCallOptions CallOptions;
4735     EVT OpVT = Op0.getValueType();
4736     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4737     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4738     SDValue Result;
4739     std::tie(Result, Chain) =
4740         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4741     Results.push_back(Result);
4742     if (IsStrict)
4743       Results.push_back(Chain);
4744     break;
4745   }
4746   case ISD::READCYCLECOUNTER: {
4747     assert(!Subtarget.is64Bit() &&
4748            "READCYCLECOUNTER only has custom type legalization on riscv32");
4749 
4750     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4751     SDValue RCW =
4752         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4753 
4754     Results.push_back(
4755         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4756     Results.push_back(RCW.getValue(2));
4757     break;
4758   }
4759   case ISD::MUL: {
4760     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4761     unsigned XLen = Subtarget.getXLen();
4762     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4763     if (Size > XLen) {
4764       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4765       SDValue LHS = N->getOperand(0);
4766       SDValue RHS = N->getOperand(1);
4767       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4768 
4769       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4770       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4771       // We need exactly one side to be unsigned.
4772       if (LHSIsU == RHSIsU)
4773         return;
4774 
4775       auto MakeMULPair = [&](SDValue S, SDValue U) {
4776         MVT XLenVT = Subtarget.getXLenVT();
4777         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4778         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4779         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4780         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4781         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4782       };
4783 
4784       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4785       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4786 
4787       // The other operand should be signed, but still prefer MULH when
4788       // possible.
4789       if (RHSIsU && LHSIsS && !RHSIsS)
4790         Results.push_back(MakeMULPair(LHS, RHS));
4791       else if (LHSIsU && RHSIsS && !LHSIsS)
4792         Results.push_back(MakeMULPair(RHS, LHS));
4793 
4794       return;
4795     }
4796     LLVM_FALLTHROUGH;
4797   }
4798   case ISD::ADD:
4799   case ISD::SUB:
4800     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4801            "Unexpected custom legalisation");
4802     if (N->getOperand(1).getOpcode() == ISD::Constant)
4803       return;
4804     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4805     break;
4806   case ISD::SHL:
4807   case ISD::SRA:
4808   case ISD::SRL:
4809     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4810            "Unexpected custom legalisation");
4811     if (N->getOperand(1).getOpcode() == ISD::Constant)
4812       return;
4813     Results.push_back(customLegalizeToWOp(N, DAG));
4814     break;
4815   case ISD::ROTL:
4816   case ISD::ROTR:
4817     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4818            "Unexpected custom legalisation");
4819     Results.push_back(customLegalizeToWOp(N, DAG));
4820     break;
4821   case ISD::CTTZ:
4822   case ISD::CTTZ_ZERO_UNDEF:
4823   case ISD::CTLZ:
4824   case ISD::CTLZ_ZERO_UNDEF: {
4825     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4826            "Unexpected custom legalisation");
4827 
4828     SDValue NewOp0 =
4829         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4830     bool IsCTZ =
4831         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4832     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4833     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4834     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4835     return;
4836   }
4837   case ISD::SDIV:
4838   case ISD::UDIV:
4839   case ISD::UREM: {
4840     MVT VT = N->getSimpleValueType(0);
4841     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4842            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4843            "Unexpected custom legalisation");
4844     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4845         N->getOperand(1).getOpcode() == ISD::Constant)
4846       return;
4847 
4848     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4849     // the upper 32 bits. For other types we need to sign or zero extend
4850     // based on the opcode.
4851     unsigned ExtOpc = ISD::ANY_EXTEND;
4852     if (VT != MVT::i32)
4853       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4854                                            : ISD::ZERO_EXTEND;
4855 
4856     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4857     break;
4858   }
4859   case ISD::UADDO:
4860   case ISD::USUBO: {
4861     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4862            "Unexpected custom legalisation");
4863     bool IsAdd = N->getOpcode() == ISD::UADDO;
4864     // Create an ADDW or SUBW.
4865     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4866     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4867     SDValue Res =
4868         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4869     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4870                       DAG.getValueType(MVT::i32));
4871 
4872     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4873     // Since the inputs are sign extended from i32, this is equivalent to
4874     // comparing the lower 32 bits.
4875     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4876     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4877                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4878 
4879     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4880     Results.push_back(Overflow);
4881     return;
4882   }
4883   case ISD::UADDSAT:
4884   case ISD::USUBSAT: {
4885     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4886            "Unexpected custom legalisation");
4887     if (Subtarget.hasStdExtZbb()) {
4888       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4889       // sign extend allows overflow of the lower 32 bits to be detected on
4890       // the promoted size.
4891       SDValue LHS =
4892           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4893       SDValue RHS =
4894           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4895       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4896       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4897       return;
4898     }
4899 
4900     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4901     // promotion for UADDO/USUBO.
4902     Results.push_back(expandAddSubSat(N, DAG));
4903     return;
4904   }
4905   case ISD::BITCAST: {
4906     EVT VT = N->getValueType(0);
4907     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4908     SDValue Op0 = N->getOperand(0);
4909     EVT Op0VT = Op0.getValueType();
4910     MVT XLenVT = Subtarget.getXLenVT();
4911     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4912       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4913       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4914     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4915                Subtarget.hasStdExtF()) {
4916       SDValue FPConv =
4917           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4918       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4919     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4920                isTypeLegal(Op0VT)) {
4921       // Custom-legalize bitcasts from fixed-length vector types to illegal
4922       // scalar types in order to improve codegen. Bitcast the vector to a
4923       // one-element vector type whose element type is the same as the result
4924       // type, and extract the first element.
4925       LLVMContext &Context = *DAG.getContext();
4926       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4927       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4928                                     DAG.getConstant(0, DL, XLenVT)));
4929     }
4930     break;
4931   }
4932   case RISCVISD::GREV:
4933   case RISCVISD::GORC: {
4934     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4935            "Unexpected custom legalisation");
4936     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4937     // This is similar to customLegalizeToWOp, except that we pass the second
4938     // operand (a TargetConstant) straight through: it is already of type
4939     // XLenVT.
4940     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4941     SDValue NewOp0 =
4942         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4943     SDValue NewOp1 =
4944         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4945     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4946     // ReplaceNodeResults requires we maintain the same type for the return
4947     // value.
4948     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4949     break;
4950   }
4951   case RISCVISD::SHFL: {
4952     // There is no SHFLIW instruction, but we can just promote the operation.
4953     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4954            "Unexpected custom legalisation");
4955     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4956     SDValue NewOp0 =
4957         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4958     SDValue NewOp1 =
4959         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4960     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4961     // ReplaceNodeResults requires we maintain the same type for the return
4962     // value.
4963     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4964     break;
4965   }
4966   case ISD::BSWAP:
4967   case ISD::BITREVERSE: {
4968     MVT VT = N->getSimpleValueType(0);
4969     MVT XLenVT = Subtarget.getXLenVT();
4970     assert((VT == MVT::i8 || VT == MVT::i16 ||
4971             (VT == MVT::i32 && Subtarget.is64Bit())) &&
4972            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4973     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
4974     unsigned Imm = VT.getSizeInBits() - 1;
4975     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
4976     if (N->getOpcode() == ISD::BSWAP)
4977       Imm &= ~0x7U;
4978     unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
4979     SDValue GREVI =
4980         DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
4981     // ReplaceNodeResults requires we maintain the same type for the return
4982     // value.
4983     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
4984     break;
4985   }
4986   case ISD::FSHL:
4987   case ISD::FSHR: {
4988     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4989            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4990     SDValue NewOp0 =
4991         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4992     SDValue NewOp1 =
4993         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4994     SDValue NewOp2 =
4995         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4996     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4997     // Mask the shift amount to 5 bits.
4998     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4999                          DAG.getConstant(0x1f, DL, MVT::i64));
5000     unsigned Opc =
5001         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5002     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5003     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5004     break;
5005   }
5006   case ISD::EXTRACT_VECTOR_ELT: {
5007     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5008     // type is illegal (currently only vXi64 RV32).
5009     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5010     // transferred to the destination register. We issue two of these from the
5011     // upper- and lower- halves of the SEW-bit vector element, slid down to the
5012     // first element.
5013     SDValue Vec = N->getOperand(0);
5014     SDValue Idx = N->getOperand(1);
5015 
5016     // The vector type hasn't been legalized yet so we can't issue target
5017     // specific nodes if it needs legalization.
5018     // FIXME: We would manually legalize if it's important.
5019     if (!isTypeLegal(Vec.getValueType()))
5020       return;
5021 
5022     MVT VecVT = Vec.getSimpleValueType();
5023 
5024     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5025            VecVT.getVectorElementType() == MVT::i64 &&
5026            "Unexpected EXTRACT_VECTOR_ELT legalization");
5027 
5028     // If this is a fixed vector, we need to convert it to a scalable vector.
5029     MVT ContainerVT = VecVT;
5030     if (VecVT.isFixedLengthVector()) {
5031       ContainerVT = getContainerForFixedLengthVector(VecVT);
5032       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5033     }
5034 
5035     MVT XLenVT = Subtarget.getXLenVT();
5036 
5037     // Use a VL of 1 to avoid processing more elements than we need.
5038     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5039     SDValue VL = DAG.getConstant(1, DL, XLenVT);
5040     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5041 
5042     // Unless the index is known to be 0, we must slide the vector down to get
5043     // the desired element into index 0.
5044     if (!isNullConstant(Idx)) {
5045       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5046                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5047     }
5048 
5049     // Extract the lower XLEN bits of the correct vector element.
5050     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5051 
5052     // To extract the upper XLEN bits of the vector element, shift the first
5053     // element right by 32 bits and re-extract the lower XLEN bits.
5054     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5055                                      DAG.getConstant(32, DL, XLenVT), VL);
5056     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5057                                  ThirtyTwoV, Mask, VL);
5058 
5059     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5060 
5061     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5062     break;
5063   }
5064   case ISD::INTRINSIC_WO_CHAIN: {
5065     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5066     switch (IntNo) {
5067     default:
5068       llvm_unreachable(
5069           "Don't know how to custom type legalize this intrinsic!");
5070     case Intrinsic::riscv_orc_b: {
5071       // Lower to the GORCI encoding for orc.b with the operand extended.
5072       SDValue NewOp =
5073           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5074       // If Zbp is enabled, use GORCIW which will sign extend the result.
5075       unsigned Opc =
5076           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5077       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5078                                 DAG.getConstant(7, DL, MVT::i64));
5079       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5080       return;
5081     }
5082     case Intrinsic::riscv_grev:
5083     case Intrinsic::riscv_gorc: {
5084       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5085              "Unexpected custom legalisation");
5086       SDValue NewOp1 =
5087           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5088       SDValue NewOp2 =
5089           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5090       unsigned Opc =
5091           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5092       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5093       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5094       break;
5095     }
5096     case Intrinsic::riscv_shfl:
5097     case Intrinsic::riscv_unshfl: {
5098       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5099              "Unexpected custom legalisation");
5100       SDValue NewOp1 =
5101           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5102       SDValue NewOp2 =
5103           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5104       unsigned Opc =
5105           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5106       if (isa<ConstantSDNode>(N->getOperand(2))) {
5107         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5108                              DAG.getConstant(0xf, DL, MVT::i64));
5109         Opc =
5110             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5111       }
5112       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5113       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5114       break;
5115     }
5116     case Intrinsic::riscv_bcompress:
5117     case Intrinsic::riscv_bdecompress: {
5118       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5119              "Unexpected custom legalisation");
5120       SDValue NewOp1 =
5121           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5122       SDValue NewOp2 =
5123           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5124       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5125                          ? RISCVISD::BCOMPRESSW
5126                          : RISCVISD::BDECOMPRESSW;
5127       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5128       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5129       break;
5130     }
5131     case Intrinsic::riscv_vmv_x_s: {
5132       EVT VT = N->getValueType(0);
5133       MVT XLenVT = Subtarget.getXLenVT();
5134       if (VT.bitsLT(XLenVT)) {
5135         // Simple case just extract using vmv.x.s and truncate.
5136         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5137                                       Subtarget.getXLenVT(), N->getOperand(1));
5138         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5139         return;
5140       }
5141 
5142       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5143              "Unexpected custom legalization");
5144 
5145       // We need to do the move in two steps.
5146       SDValue Vec = N->getOperand(1);
5147       MVT VecVT = Vec.getSimpleValueType();
5148 
5149       // First extract the lower XLEN bits of the element.
5150       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5151 
5152       // To extract the upper XLEN bits of the vector element, shift the first
5153       // element right by 32 bits and re-extract the lower XLEN bits.
5154       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5155       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5156       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5157       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5158                                        DAG.getConstant(32, DL, XLenVT), VL);
5159       SDValue LShr32 =
5160           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5161       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5162 
5163       Results.push_back(
5164           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5165       break;
5166     }
5167     }
5168     break;
5169   }
5170   case ISD::VECREDUCE_ADD:
5171   case ISD::VECREDUCE_AND:
5172   case ISD::VECREDUCE_OR:
5173   case ISD::VECREDUCE_XOR:
5174   case ISD::VECREDUCE_SMAX:
5175   case ISD::VECREDUCE_UMAX:
5176   case ISD::VECREDUCE_SMIN:
5177   case ISD::VECREDUCE_UMIN:
5178     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5179       Results.push_back(V);
5180     break;
5181   case ISD::FLT_ROUNDS_: {
5182     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5183     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5184     Results.push_back(Res.getValue(0));
5185     Results.push_back(Res.getValue(1));
5186     break;
5187   }
5188   }
5189 }
5190 
5191 // A structure to hold one of the bit-manipulation patterns below. Together, a
5192 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5193 //   (or (and (shl x, 1), 0xAAAAAAAA),
5194 //       (and (srl x, 1), 0x55555555))
5195 struct RISCVBitmanipPat {
5196   SDValue Op;
5197   unsigned ShAmt;
5198   bool IsSHL;
5199 
5200   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5201     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5202   }
5203 };
5204 
5205 // Matches patterns of the form
5206 //   (and (shl x, C2), (C1 << C2))
5207 //   (and (srl x, C2), C1)
5208 //   (shl (and x, C1), C2)
5209 //   (srl (and x, (C1 << C2)), C2)
5210 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5211 // The expected masks for each shift amount are specified in BitmanipMasks where
5212 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5213 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5214 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5215 // XLen is 64.
5216 static Optional<RISCVBitmanipPat>
5217 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5218   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5219          "Unexpected number of masks");
5220   Optional<uint64_t> Mask;
5221   // Optionally consume a mask around the shift operation.
5222   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5223     Mask = Op.getConstantOperandVal(1);
5224     Op = Op.getOperand(0);
5225   }
5226   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5227     return None;
5228   bool IsSHL = Op.getOpcode() == ISD::SHL;
5229 
5230   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5231     return None;
5232   uint64_t ShAmt = Op.getConstantOperandVal(1);
5233 
5234   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5235   if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
5236     return None;
5237   // If we don't have enough masks for 64 bit, then we must be trying to
5238   // match SHFL so we're only allowed to shift 1/4 of the width.
5239   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5240     return None;
5241 
5242   SDValue Src = Op.getOperand(0);
5243 
5244   // The expected mask is shifted left when the AND is found around SHL
5245   // patterns.
5246   //   ((x >> 1) & 0x55555555)
5247   //   ((x << 1) & 0xAAAAAAAA)
5248   bool SHLExpMask = IsSHL;
5249 
5250   if (!Mask) {
5251     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5252     // the mask is all ones: consume that now.
5253     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5254       Mask = Src.getConstantOperandVal(1);
5255       Src = Src.getOperand(0);
5256       // The expected mask is now in fact shifted left for SRL, so reverse the
5257       // decision.
5258       //   ((x & 0xAAAAAAAA) >> 1)
5259       //   ((x & 0x55555555) << 1)
5260       SHLExpMask = !SHLExpMask;
5261     } else {
5262       // Use a default shifted mask of all-ones if there's no AND, truncated
5263       // down to the expected width. This simplifies the logic later on.
5264       Mask = maskTrailingOnes<uint64_t>(Width);
5265       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5266     }
5267   }
5268 
5269   unsigned MaskIdx = Log2_32(ShAmt);
5270   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5271 
5272   if (SHLExpMask)
5273     ExpMask <<= ShAmt;
5274 
5275   if (Mask != ExpMask)
5276     return None;
5277 
5278   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5279 }
5280 
5281 // Matches any of the following bit-manipulation patterns:
5282 //   (and (shl x, 1), (0x55555555 << 1))
5283 //   (and (srl x, 1), 0x55555555)
5284 //   (shl (and x, 0x55555555), 1)
5285 //   (srl (and x, (0x55555555 << 1)), 1)
5286 // where the shift amount and mask may vary thus:
5287 //   [1]  = 0x55555555 / 0xAAAAAAAA
5288 //   [2]  = 0x33333333 / 0xCCCCCCCC
5289 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5290 //   [8]  = 0x00FF00FF / 0xFF00FF00
5291 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5292 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5293 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5294   // These are the unshifted masks which we use to match bit-manipulation
5295   // patterns. They may be shifted left in certain circumstances.
5296   static const uint64_t BitmanipMasks[] = {
5297       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5298       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5299 
5300   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5301 }
5302 
5303 // Match the following pattern as a GREVI(W) operation
5304 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5305 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5306                                const RISCVSubtarget &Subtarget) {
5307   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5308   EVT VT = Op.getValueType();
5309 
5310   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5311     auto LHS = matchGREVIPat(Op.getOperand(0));
5312     auto RHS = matchGREVIPat(Op.getOperand(1));
5313     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5314       SDLoc DL(Op);
5315       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5316                          DAG.getConstant(LHS->ShAmt, DL, VT));
5317     }
5318   }
5319   return SDValue();
5320 }
5321 
5322 // Matches any the following pattern as a GORCI(W) operation
5323 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5324 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5325 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5326 // Note that with the variant of 3.,
5327 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5328 // the inner pattern will first be matched as GREVI and then the outer
5329 // pattern will be matched to GORC via the first rule above.
5330 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5331 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5332                                const RISCVSubtarget &Subtarget) {
5333   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5334   EVT VT = Op.getValueType();
5335 
5336   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5337     SDLoc DL(Op);
5338     SDValue Op0 = Op.getOperand(0);
5339     SDValue Op1 = Op.getOperand(1);
5340 
5341     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5342       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5343           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5344           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5345         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5346       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5347       if ((Reverse.getOpcode() == ISD::ROTL ||
5348            Reverse.getOpcode() == ISD::ROTR) &&
5349           Reverse.getOperand(0) == X &&
5350           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5351         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5352         if (RotAmt == (VT.getSizeInBits() / 2))
5353           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5354                              DAG.getConstant(RotAmt, DL, VT));
5355       }
5356       return SDValue();
5357     };
5358 
5359     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5360     if (SDValue V = MatchOROfReverse(Op0, Op1))
5361       return V;
5362     if (SDValue V = MatchOROfReverse(Op1, Op0))
5363       return V;
5364 
5365     // OR is commutable so canonicalize its OR operand to the left
5366     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5367       std::swap(Op0, Op1);
5368     if (Op0.getOpcode() != ISD::OR)
5369       return SDValue();
5370     SDValue OrOp0 = Op0.getOperand(0);
5371     SDValue OrOp1 = Op0.getOperand(1);
5372     auto LHS = matchGREVIPat(OrOp0);
5373     // OR is commutable so swap the operands and try again: x might have been
5374     // on the left
5375     if (!LHS) {
5376       std::swap(OrOp0, OrOp1);
5377       LHS = matchGREVIPat(OrOp0);
5378     }
5379     auto RHS = matchGREVIPat(Op1);
5380     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5381       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5382                          DAG.getConstant(LHS->ShAmt, DL, VT));
5383     }
5384   }
5385   return SDValue();
5386 }
5387 
5388 // Matches any of the following bit-manipulation patterns:
5389 //   (and (shl x, 1), (0x22222222 << 1))
5390 //   (and (srl x, 1), 0x22222222)
5391 //   (shl (and x, 0x22222222), 1)
5392 //   (srl (and x, (0x22222222 << 1)), 1)
5393 // where the shift amount and mask may vary thus:
5394 //   [1]  = 0x22222222 / 0x44444444
5395 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5396 //   [4]  = 0x00F000F0 / 0x0F000F00
5397 //   [8]  = 0x0000FF00 / 0x00FF0000
5398 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5399 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5400   // These are the unshifted masks which we use to match bit-manipulation
5401   // patterns. They may be shifted left in certain circumstances.
5402   static const uint64_t BitmanipMasks[] = {
5403       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5404       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5405 
5406   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5407 }
5408 
5409 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5410 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5411                                const RISCVSubtarget &Subtarget) {
5412   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5413   EVT VT = Op.getValueType();
5414 
5415   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5416     return SDValue();
5417 
5418   SDValue Op0 = Op.getOperand(0);
5419   SDValue Op1 = Op.getOperand(1);
5420 
5421   // Or is commutable so canonicalize the second OR to the LHS.
5422   if (Op0.getOpcode() != ISD::OR)
5423     std::swap(Op0, Op1);
5424   if (Op0.getOpcode() != ISD::OR)
5425     return SDValue();
5426 
5427   // We found an inner OR, so our operands are the operands of the inner OR
5428   // and the other operand of the outer OR.
5429   SDValue A = Op0.getOperand(0);
5430   SDValue B = Op0.getOperand(1);
5431   SDValue C = Op1;
5432 
5433   auto Match1 = matchSHFLPat(A);
5434   auto Match2 = matchSHFLPat(B);
5435 
5436   // If neither matched, we failed.
5437   if (!Match1 && !Match2)
5438     return SDValue();
5439 
5440   // We had at least one match. if one failed, try the remaining C operand.
5441   if (!Match1) {
5442     std::swap(A, C);
5443     Match1 = matchSHFLPat(A);
5444     if (!Match1)
5445       return SDValue();
5446   } else if (!Match2) {
5447     std::swap(B, C);
5448     Match2 = matchSHFLPat(B);
5449     if (!Match2)
5450       return SDValue();
5451   }
5452   assert(Match1 && Match2);
5453 
5454   // Make sure our matches pair up.
5455   if (!Match1->formsPairWith(*Match2))
5456     return SDValue();
5457 
5458   // All the remains is to make sure C is an AND with the same input, that masks
5459   // out the bits that are being shuffled.
5460   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5461       C.getOperand(0) != Match1->Op)
5462     return SDValue();
5463 
5464   uint64_t Mask = C.getConstantOperandVal(1);
5465 
5466   static const uint64_t BitmanipMasks[] = {
5467       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5468       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5469   };
5470 
5471   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5472   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5473   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5474 
5475   if (Mask != ExpMask)
5476     return SDValue();
5477 
5478   SDLoc DL(Op);
5479   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5480                      DAG.getConstant(Match1->ShAmt, DL, VT));
5481 }
5482 
5483 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5484 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5485 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5486 // not undo itself, but they are redundant.
5487 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5488   SDValue Src = N->getOperand(0);
5489 
5490   if (Src.getOpcode() != N->getOpcode())
5491     return SDValue();
5492 
5493   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5494       !isa<ConstantSDNode>(Src.getOperand(1)))
5495     return SDValue();
5496 
5497   unsigned ShAmt1 = N->getConstantOperandVal(1);
5498   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5499   Src = Src.getOperand(0);
5500 
5501   unsigned CombinedShAmt;
5502   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5503     CombinedShAmt = ShAmt1 | ShAmt2;
5504   else
5505     CombinedShAmt = ShAmt1 ^ ShAmt2;
5506 
5507   if (CombinedShAmt == 0)
5508     return Src;
5509 
5510   SDLoc DL(N);
5511   return DAG.getNode(
5512       N->getOpcode(), DL, N->getValueType(0), Src,
5513       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5514 }
5515 
5516 // Combine a constant select operand into its use:
5517 //
5518 // (and (select_cc lhs, rhs, cc, -1, c), x)
5519 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5520 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5521 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5522 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5523 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5524 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5525                                      SelectionDAG &DAG, bool AllOnes) {
5526   EVT VT = N->getValueType(0);
5527 
5528   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5529     return SDValue();
5530 
5531   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5532     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5533   };
5534 
5535   bool SwapSelectOps;
5536   SDValue TrueVal = Slct.getOperand(3);
5537   SDValue FalseVal = Slct.getOperand(4);
5538   SDValue NonConstantVal;
5539   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5540     SwapSelectOps = false;
5541     NonConstantVal = FalseVal;
5542   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5543     SwapSelectOps = true;
5544     NonConstantVal = TrueVal;
5545   } else
5546     return SDValue();
5547 
5548   // Slct is now know to be the desired identity constant when CC is true.
5549   TrueVal = OtherOp;
5550   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5551   // Unless SwapSelectOps says CC should be false.
5552   if (SwapSelectOps)
5553     std::swap(TrueVal, FalseVal);
5554 
5555   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5556                      {Slct.getOperand(0), Slct.getOperand(1),
5557                       Slct.getOperand(2), TrueVal, FalseVal});
5558 }
5559 
5560 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5561 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5562                                                 bool AllOnes) {
5563   SDValue N0 = N->getOperand(0);
5564   SDValue N1 = N->getOperand(1);
5565   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5566     return Result;
5567   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5568     return Result;
5569   return SDValue();
5570 }
5571 
5572 static SDValue performANDCombine(SDNode *N,
5573                                  TargetLowering::DAGCombinerInfo &DCI,
5574                                  const RISCVSubtarget &Subtarget) {
5575   SelectionDAG &DAG = DCI.DAG;
5576 
5577   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5578   //      (select lhs, rhs, cc, x, (and x, y))
5579   return combineSelectCCAndUseCommutative(N, DAG, true);
5580 }
5581 
5582 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5583                                 const RISCVSubtarget &Subtarget) {
5584   SelectionDAG &DAG = DCI.DAG;
5585   if (Subtarget.hasStdExtZbp()) {
5586     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5587       return GREV;
5588     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5589       return GORC;
5590     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5591       return SHFL;
5592   }
5593 
5594   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5595   //      (select lhs, rhs, cc, x, (or x, y))
5596   return combineSelectCCAndUseCommutative(N, DAG, false);
5597 }
5598 
5599 static SDValue performXORCombine(SDNode *N,
5600                                  TargetLowering::DAGCombinerInfo &DCI,
5601                                  const RISCVSubtarget &Subtarget) {
5602   SelectionDAG &DAG = DCI.DAG;
5603 
5604   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5605   //      (select lhs, rhs, cc, x, (xor x, y))
5606   return combineSelectCCAndUseCommutative(N, DAG, false);
5607 }
5608 
5609 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5610                                                DAGCombinerInfo &DCI) const {
5611   SelectionDAG &DAG = DCI.DAG;
5612 
5613   switch (N->getOpcode()) {
5614   default:
5615     break;
5616   case RISCVISD::SplitF64: {
5617     SDValue Op0 = N->getOperand(0);
5618     // If the input to SplitF64 is just BuildPairF64 then the operation is
5619     // redundant. Instead, use BuildPairF64's operands directly.
5620     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5621       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5622 
5623     SDLoc DL(N);
5624 
5625     // It's cheaper to materialise two 32-bit integers than to load a double
5626     // from the constant pool and transfer it to integer registers through the
5627     // stack.
5628     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5629       APInt V = C->getValueAPF().bitcastToAPInt();
5630       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5631       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5632       return DCI.CombineTo(N, Lo, Hi);
5633     }
5634 
5635     // This is a target-specific version of a DAGCombine performed in
5636     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5637     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5638     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5639     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5640         !Op0.getNode()->hasOneUse())
5641       break;
5642     SDValue NewSplitF64 =
5643         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5644                     Op0.getOperand(0));
5645     SDValue Lo = NewSplitF64.getValue(0);
5646     SDValue Hi = NewSplitF64.getValue(1);
5647     APInt SignBit = APInt::getSignMask(32);
5648     if (Op0.getOpcode() == ISD::FNEG) {
5649       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5650                                   DAG.getConstant(SignBit, DL, MVT::i32));
5651       return DCI.CombineTo(N, Lo, NewHi);
5652     }
5653     assert(Op0.getOpcode() == ISD::FABS);
5654     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5655                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5656     return DCI.CombineTo(N, Lo, NewHi);
5657   }
5658   case RISCVISD::SLLW:
5659   case RISCVISD::SRAW:
5660   case RISCVISD::SRLW:
5661   case RISCVISD::ROLW:
5662   case RISCVISD::RORW: {
5663     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5664     SDValue LHS = N->getOperand(0);
5665     SDValue RHS = N->getOperand(1);
5666     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5667     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5668     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5669         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5670       if (N->getOpcode() != ISD::DELETED_NODE)
5671         DCI.AddToWorklist(N);
5672       return SDValue(N, 0);
5673     }
5674     break;
5675   }
5676   case RISCVISD::CLZW:
5677   case RISCVISD::CTZW: {
5678     // Only the lower 32 bits of the first operand are read
5679     SDValue Op0 = N->getOperand(0);
5680     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5681     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5682       if (N->getOpcode() != ISD::DELETED_NODE)
5683         DCI.AddToWorklist(N);
5684       return SDValue(N, 0);
5685     }
5686     break;
5687   }
5688   case RISCVISD::FSL:
5689   case RISCVISD::FSR: {
5690     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5691     SDValue ShAmt = N->getOperand(2);
5692     unsigned BitWidth = ShAmt.getValueSizeInBits();
5693     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5694     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5695     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5696       if (N->getOpcode() != ISD::DELETED_NODE)
5697         DCI.AddToWorklist(N);
5698       return SDValue(N, 0);
5699     }
5700     break;
5701   }
5702   case RISCVISD::FSLW:
5703   case RISCVISD::FSRW: {
5704     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5705     // read.
5706     SDValue Op0 = N->getOperand(0);
5707     SDValue Op1 = N->getOperand(1);
5708     SDValue ShAmt = N->getOperand(2);
5709     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5710     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5711     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5712         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5713         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5714       if (N->getOpcode() != ISD::DELETED_NODE)
5715         DCI.AddToWorklist(N);
5716       return SDValue(N, 0);
5717     }
5718     break;
5719   }
5720   case RISCVISD::GREV:
5721   case RISCVISD::GORC: {
5722     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5723     SDValue ShAmt = N->getOperand(1);
5724     unsigned BitWidth = ShAmt.getValueSizeInBits();
5725     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5726     APInt ShAmtMask(BitWidth, BitWidth - 1);
5727     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5728       if (N->getOpcode() != ISD::DELETED_NODE)
5729         DCI.AddToWorklist(N);
5730       return SDValue(N, 0);
5731     }
5732 
5733     return combineGREVI_GORCI(N, DCI.DAG);
5734   }
5735   case RISCVISD::GREVW:
5736   case RISCVISD::GORCW: {
5737     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5738     SDValue LHS = N->getOperand(0);
5739     SDValue RHS = N->getOperand(1);
5740     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5741     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5742     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5743         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5744       if (N->getOpcode() != ISD::DELETED_NODE)
5745         DCI.AddToWorklist(N);
5746       return SDValue(N, 0);
5747     }
5748 
5749     return combineGREVI_GORCI(N, DCI.DAG);
5750   }
5751   case RISCVISD::SHFL:
5752   case RISCVISD::UNSHFL: {
5753     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5754     SDValue ShAmt = N->getOperand(1);
5755     unsigned BitWidth = ShAmt.getValueSizeInBits();
5756     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5757     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5758     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5759       if (N->getOpcode() != ISD::DELETED_NODE)
5760         DCI.AddToWorklist(N);
5761       return SDValue(N, 0);
5762     }
5763 
5764     break;
5765   }
5766   case RISCVISD::SHFLW:
5767   case RISCVISD::UNSHFLW: {
5768     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5769     SDValue LHS = N->getOperand(0);
5770     SDValue RHS = N->getOperand(1);
5771     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5772     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5773     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5774         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5775       if (N->getOpcode() != ISD::DELETED_NODE)
5776         DCI.AddToWorklist(N);
5777       return SDValue(N, 0);
5778     }
5779 
5780     break;
5781   }
5782   case RISCVISD::BCOMPRESSW:
5783   case RISCVISD::BDECOMPRESSW: {
5784     // Only the lower 32 bits of LHS and RHS are read.
5785     SDValue LHS = N->getOperand(0);
5786     SDValue RHS = N->getOperand(1);
5787     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5788     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5789         SimplifyDemandedBits(RHS, Mask, DCI)) {
5790       if (N->getOpcode() != ISD::DELETED_NODE)
5791         DCI.AddToWorklist(N);
5792       return SDValue(N, 0);
5793     }
5794 
5795     break;
5796   }
5797   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5798     SDLoc DL(N);
5799     SDValue Op0 = N->getOperand(0);
5800     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5801     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5802     // of the FMV_W_X_RV64 operand.
5803     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5804       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5805              "Unexpected value type!");
5806       return Op0.getOperand(0);
5807     }
5808 
5809     // This is a target-specific version of a DAGCombine performed in
5810     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5811     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5812     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5813     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5814         !Op0.getNode()->hasOneUse())
5815       break;
5816     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5817                                  Op0.getOperand(0));
5818     APInt SignBit = APInt::getSignMask(32).sext(64);
5819     if (Op0.getOpcode() == ISD::FNEG)
5820       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5821                          DAG.getConstant(SignBit, DL, MVT::i64));
5822 
5823     assert(Op0.getOpcode() == ISD::FABS);
5824     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5825                        DAG.getConstant(~SignBit, DL, MVT::i64));
5826   }
5827   case ISD::AND:
5828     return performANDCombine(N, DCI, Subtarget);
5829   case ISD::OR:
5830     return performORCombine(N, DCI, Subtarget);
5831   case ISD::XOR:
5832     return performXORCombine(N, DCI, Subtarget);
5833   case RISCVISD::SELECT_CC: {
5834     // Transform
5835     SDValue LHS = N->getOperand(0);
5836     SDValue RHS = N->getOperand(1);
5837     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5838     if (!ISD::isIntEqualitySetCC(CCVal))
5839       break;
5840 
5841     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5842     //      (select_cc X, Y, lt, trueV, falseV)
5843     // Sometimes the setcc is introduced after select_cc has been formed.
5844     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5845         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5846       // If we're looking for eq 0 instead of ne 0, we need to invert the
5847       // condition.
5848       bool Invert = CCVal == ISD::SETEQ;
5849       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5850       if (Invert)
5851         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5852 
5853       SDLoc DL(N);
5854       RHS = LHS.getOperand(1);
5855       LHS = LHS.getOperand(0);
5856       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5857 
5858       SDValue TargetCC =
5859           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5860       return DAG.getNode(
5861           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5862           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5863     }
5864 
5865     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5866     //      (select_cc X, Y, eq/ne, trueV, falseV)
5867     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5868       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5869                          {LHS.getOperand(0), LHS.getOperand(1),
5870                           N->getOperand(2), N->getOperand(3),
5871                           N->getOperand(4)});
5872     // (select_cc X, 1, setne, trueV, falseV) ->
5873     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5874     // This can occur when legalizing some floating point comparisons.
5875     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5876     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5877       SDLoc DL(N);
5878       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5879       SDValue TargetCC =
5880           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5881       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5882       return DAG.getNode(
5883           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5884           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5885     }
5886 
5887     break;
5888   }
5889   case RISCVISD::BR_CC: {
5890     SDValue LHS = N->getOperand(1);
5891     SDValue RHS = N->getOperand(2);
5892     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5893     if (!ISD::isIntEqualitySetCC(CCVal))
5894       break;
5895 
5896     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5897     //      (br_cc X, Y, lt, dest)
5898     // Sometimes the setcc is introduced after br_cc has been formed.
5899     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5900         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5901       // If we're looking for eq 0 instead of ne 0, we need to invert the
5902       // condition.
5903       bool Invert = CCVal == ISD::SETEQ;
5904       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5905       if (Invert)
5906         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5907 
5908       SDLoc DL(N);
5909       RHS = LHS.getOperand(1);
5910       LHS = LHS.getOperand(0);
5911       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5912 
5913       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5914                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5915                          N->getOperand(4));
5916     }
5917 
5918     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5919     //      (br_cc X, Y, eq/ne, trueV, falseV)
5920     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5921       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5922                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5923                          N->getOperand(3), N->getOperand(4));
5924 
5925     // (br_cc X, 1, setne, br_cc) ->
5926     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5927     // This can occur when legalizing some floating point comparisons.
5928     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5929     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5930       SDLoc DL(N);
5931       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5932       SDValue TargetCC = DAG.getCondCode(CCVal);
5933       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5934       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5935                          N->getOperand(0), LHS, RHS, TargetCC,
5936                          N->getOperand(4));
5937     }
5938     break;
5939   }
5940   case ISD::FCOPYSIGN: {
5941     EVT VT = N->getValueType(0);
5942     if (!VT.isVector())
5943       break;
5944     // There is a form of VFSGNJ which injects the negated sign of its second
5945     // operand. Try and bubble any FNEG up after the extend/round to produce
5946     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5947     // TRUNC=1.
5948     SDValue In2 = N->getOperand(1);
5949     // Avoid cases where the extend/round has multiple uses, as duplicating
5950     // those is typically more expensive than removing a fneg.
5951     if (!In2.hasOneUse())
5952       break;
5953     if (In2.getOpcode() != ISD::FP_EXTEND &&
5954         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5955       break;
5956     In2 = In2.getOperand(0);
5957     if (In2.getOpcode() != ISD::FNEG)
5958       break;
5959     SDLoc DL(N);
5960     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5961     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5962                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5963   }
5964   case ISD::MGATHER:
5965   case ISD::MSCATTER: {
5966     if (!DCI.isBeforeLegalize())
5967       break;
5968     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5969     SDValue Index = MGSN->getIndex();
5970     EVT IndexVT = Index.getValueType();
5971     MVT XLenVT = Subtarget.getXLenVT();
5972     // RISCV indexed loads only support the "unsigned unscaled" addressing
5973     // mode, so anything else must be manually legalized.
5974     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5975                                 (MGSN->isIndexSigned() &&
5976                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5977     if (!NeedsIdxLegalization)
5978       break;
5979 
5980     SDLoc DL(N);
5981 
5982     // Any index legalization should first promote to XLenVT, so we don't lose
5983     // bits when scaling. This may create an illegal index type so we let
5984     // LLVM's legalization take care of the splitting.
5985     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5986       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5987       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5988                                                 : ISD::ZERO_EXTEND,
5989                           DL, IndexVT, Index);
5990     }
5991 
5992     unsigned Scale = N->getConstantOperandVal(5);
5993     if (MGSN->isIndexScaled() && Scale != 1) {
5994       // Manually scale the indices by the element size.
5995       // TODO: Sanitize the scale operand here?
5996       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5997       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5998       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5999     }
6000 
6001     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
6002     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
6003       return DAG.getMaskedGather(
6004           N->getVTList(), MGSN->getMemoryVT(), DL,
6005           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
6006            MGSN->getBasePtr(), Index, MGN->getScale()},
6007           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
6008     }
6009     const auto *MSN = cast<MaskedScatterSDNode>(N);
6010     return DAG.getMaskedScatter(
6011         N->getVTList(), MGSN->getMemoryVT(), DL,
6012         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
6013          Index, MGSN->getScale()},
6014         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
6015   }
6016   case RISCVISD::SRA_VL:
6017   case RISCVISD::SRL_VL:
6018   case RISCVISD::SHL_VL: {
6019     SDValue ShAmt = N->getOperand(1);
6020     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6021       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6022       SDLoc DL(N);
6023       SDValue VL = N->getOperand(3);
6024       EVT VT = N->getValueType(0);
6025       ShAmt =
6026           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
6027       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
6028                          N->getOperand(2), N->getOperand(3));
6029     }
6030     break;
6031   }
6032   case ISD::SRA:
6033   case ISD::SRL:
6034   case ISD::SHL: {
6035     SDValue ShAmt = N->getOperand(1);
6036     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
6037       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
6038       SDLoc DL(N);
6039       EVT VT = N->getValueType(0);
6040       ShAmt =
6041           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
6042       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
6043     }
6044     break;
6045   }
6046   }
6047 
6048   return SDValue();
6049 }
6050 
6051 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
6052     const SDNode *N, CombineLevel Level) const {
6053   // The following folds are only desirable if `(OP _, c1 << c2)` can be
6054   // materialised in fewer instructions than `(OP _, c1)`:
6055   //
6056   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
6057   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
6058   SDValue N0 = N->getOperand(0);
6059   EVT Ty = N0.getValueType();
6060   if (Ty.isScalarInteger() &&
6061       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
6062     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6063     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
6064     if (C1 && C2) {
6065       const APInt &C1Int = C1->getAPIntValue();
6066       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
6067 
6068       // We can materialise `c1 << c2` into an add immediate, so it's "free",
6069       // and the combine should happen, to potentially allow further combines
6070       // later.
6071       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
6072           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
6073         return true;
6074 
6075       // We can materialise `c1` in an add immediate, so it's "free", and the
6076       // combine should be prevented.
6077       if (C1Int.getMinSignedBits() <= 64 &&
6078           isLegalAddImmediate(C1Int.getSExtValue()))
6079         return false;
6080 
6081       // Neither constant will fit into an immediate, so find materialisation
6082       // costs.
6083       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
6084                                               Subtarget.is64Bit());
6085       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
6086           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
6087 
6088       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
6089       // combine should be prevented.
6090       if (C1Cost < ShiftedC1Cost)
6091         return false;
6092     }
6093   }
6094   return true;
6095 }
6096 
6097 bool RISCVTargetLowering::targetShrinkDemandedConstant(
6098     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
6099     TargetLoweringOpt &TLO) const {
6100   // Delay this optimization as late as possible.
6101   if (!TLO.LegalOps)
6102     return false;
6103 
6104   EVT VT = Op.getValueType();
6105   if (VT.isVector())
6106     return false;
6107 
6108   // Only handle AND for now.
6109   if (Op.getOpcode() != ISD::AND)
6110     return false;
6111 
6112   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6113   if (!C)
6114     return false;
6115 
6116   const APInt &Mask = C->getAPIntValue();
6117 
6118   // Clear all non-demanded bits initially.
6119   APInt ShrunkMask = Mask & DemandedBits;
6120 
6121   // Try to make a smaller immediate by setting undemanded bits.
6122 
6123   APInt ExpandedMask = Mask | ~DemandedBits;
6124 
6125   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6126     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6127   };
6128   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6129     if (NewMask == Mask)
6130       return true;
6131     SDLoc DL(Op);
6132     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6133     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6134     return TLO.CombineTo(Op, NewOp);
6135   };
6136 
6137   // If the shrunk mask fits in sign extended 12 bits, let the target
6138   // independent code apply it.
6139   if (ShrunkMask.isSignedIntN(12))
6140     return false;
6141 
6142   // Preserve (and X, 0xffff) when zext.h is supported.
6143   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6144     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6145     if (IsLegalMask(NewMask))
6146       return UseMask(NewMask);
6147   }
6148 
6149   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6150   if (VT == MVT::i64) {
6151     APInt NewMask = APInt(64, 0xffffffff);
6152     if (IsLegalMask(NewMask))
6153       return UseMask(NewMask);
6154   }
6155 
6156   // For the remaining optimizations, we need to be able to make a negative
6157   // number through a combination of mask and undemanded bits.
6158   if (!ExpandedMask.isNegative())
6159     return false;
6160 
6161   // What is the fewest number of bits we need to represent the negative number.
6162   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6163 
6164   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6165   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6166   APInt NewMask = ShrunkMask;
6167   if (MinSignedBits <= 12)
6168     NewMask.setBitsFrom(11);
6169   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6170     NewMask.setBitsFrom(31);
6171   else
6172     return false;
6173 
6174   // Sanity check that our new mask is a subset of the demanded mask.
6175   assert(IsLegalMask(NewMask));
6176   return UseMask(NewMask);
6177 }
6178 
6179 static void computeGREV(APInt &Src, unsigned ShAmt) {
6180   ShAmt &= Src.getBitWidth() - 1;
6181   uint64_t x = Src.getZExtValue();
6182   if (ShAmt & 1)
6183     x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
6184   if (ShAmt & 2)
6185     x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
6186   if (ShAmt & 4)
6187     x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
6188   if (ShAmt & 8)
6189     x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
6190   if (ShAmt & 16)
6191     x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
6192   if (ShAmt & 32)
6193     x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
6194   Src = x;
6195 }
6196 
6197 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6198                                                         KnownBits &Known,
6199                                                         const APInt &DemandedElts,
6200                                                         const SelectionDAG &DAG,
6201                                                         unsigned Depth) const {
6202   unsigned BitWidth = Known.getBitWidth();
6203   unsigned Opc = Op.getOpcode();
6204   assert((Opc >= ISD::BUILTIN_OP_END ||
6205           Opc == ISD::INTRINSIC_WO_CHAIN ||
6206           Opc == ISD::INTRINSIC_W_CHAIN ||
6207           Opc == ISD::INTRINSIC_VOID) &&
6208          "Should use MaskedValueIsZero if you don't know whether Op"
6209          " is a target node!");
6210 
6211   Known.resetAll();
6212   switch (Opc) {
6213   default: break;
6214   case RISCVISD::SELECT_CC: {
6215     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6216     // If we don't know any bits, early out.
6217     if (Known.isUnknown())
6218       break;
6219     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6220 
6221     // Only known if known in both the LHS and RHS.
6222     Known = KnownBits::commonBits(Known, Known2);
6223     break;
6224   }
6225   case RISCVISD::REMUW: {
6226     KnownBits Known2;
6227     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6228     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6229     // We only care about the lower 32 bits.
6230     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6231     // Restore the original width by sign extending.
6232     Known = Known.sext(BitWidth);
6233     break;
6234   }
6235   case RISCVISD::DIVUW: {
6236     KnownBits Known2;
6237     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6238     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6239     // We only care about the lower 32 bits.
6240     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6241     // Restore the original width by sign extending.
6242     Known = Known.sext(BitWidth);
6243     break;
6244   }
6245   case RISCVISD::CTZW: {
6246     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6247     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6248     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6249     Known.Zero.setBitsFrom(LowBits);
6250     break;
6251   }
6252   case RISCVISD::CLZW: {
6253     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6254     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6255     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6256     Known.Zero.setBitsFrom(LowBits);
6257     break;
6258   }
6259   case RISCVISD::GREV:
6260   case RISCVISD::GREVW: {
6261     if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
6262       Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6263       if (Opc == RISCVISD::GREVW)
6264         Known = Known.trunc(32);
6265       unsigned ShAmt = C->getZExtValue();
6266       computeGREV(Known.Zero, ShAmt);
6267       computeGREV(Known.One, ShAmt);
6268       if (Opc == RISCVISD::GREVW)
6269         Known = Known.sext(BitWidth);
6270     }
6271     break;
6272   }
6273   case RISCVISD::READ_VLENB:
6274     // We assume VLENB is at least 16 bytes.
6275     Known.Zero.setLowBits(4);
6276     break;
6277   case ISD::INTRINSIC_W_CHAIN: {
6278     unsigned IntNo = Op.getConstantOperandVal(1);
6279     switch (IntNo) {
6280     default:
6281       // We can't do anything for most intrinsics.
6282       break;
6283     case Intrinsic::riscv_vsetvli:
6284     case Intrinsic::riscv_vsetvlimax:
6285       // Assume that VL output is positive and would fit in an int32_t.
6286       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6287       if (BitWidth >= 32)
6288         Known.Zero.setBitsFrom(31);
6289       break;
6290     }
6291     break;
6292   }
6293   }
6294 }
6295 
6296 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6297     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6298     unsigned Depth) const {
6299   switch (Op.getOpcode()) {
6300   default:
6301     break;
6302   case RISCVISD::SLLW:
6303   case RISCVISD::SRAW:
6304   case RISCVISD::SRLW:
6305   case RISCVISD::DIVW:
6306   case RISCVISD::DIVUW:
6307   case RISCVISD::REMUW:
6308   case RISCVISD::ROLW:
6309   case RISCVISD::RORW:
6310   case RISCVISD::GREVW:
6311   case RISCVISD::GORCW:
6312   case RISCVISD::FSLW:
6313   case RISCVISD::FSRW:
6314   case RISCVISD::SHFLW:
6315   case RISCVISD::UNSHFLW:
6316   case RISCVISD::BCOMPRESSW:
6317   case RISCVISD::BDECOMPRESSW:
6318     // TODO: As the result is sign-extended, this is conservatively correct. A
6319     // more precise answer could be calculated for SRAW depending on known
6320     // bits in the shift amount.
6321     return 33;
6322   case RISCVISD::SHFL:
6323   case RISCVISD::UNSHFL: {
6324     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6325     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6326     // will stay within the upper 32 bits. If there were more than 32 sign bits
6327     // before there will be at least 33 sign bits after.
6328     if (Op.getValueType() == MVT::i64 &&
6329         isa<ConstantSDNode>(Op.getOperand(1)) &&
6330         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6331       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6332       if (Tmp > 32)
6333         return 33;
6334     }
6335     break;
6336   }
6337   case RISCVISD::VMV_X_S:
6338     // The number of sign bits of the scalar result is computed by obtaining the
6339     // element type of the input vector operand, subtracting its width from the
6340     // XLEN, and then adding one (sign bit within the element type). If the
6341     // element type is wider than XLen, the least-significant XLEN bits are
6342     // taken.
6343     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6344       return 1;
6345     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6346   }
6347 
6348   return 1;
6349 }
6350 
6351 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6352                                                   MachineBasicBlock *BB) {
6353   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6354 
6355   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6356   // Should the count have wrapped while it was being read, we need to try
6357   // again.
6358   // ...
6359   // read:
6360   // rdcycleh x3 # load high word of cycle
6361   // rdcycle  x2 # load low word of cycle
6362   // rdcycleh x4 # load high word of cycle
6363   // bne x3, x4, read # check if high word reads match, otherwise try again
6364   // ...
6365 
6366   MachineFunction &MF = *BB->getParent();
6367   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6368   MachineFunction::iterator It = ++BB->getIterator();
6369 
6370   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6371   MF.insert(It, LoopMBB);
6372 
6373   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6374   MF.insert(It, DoneMBB);
6375 
6376   // Transfer the remainder of BB and its successor edges to DoneMBB.
6377   DoneMBB->splice(DoneMBB->begin(), BB,
6378                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6379   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6380 
6381   BB->addSuccessor(LoopMBB);
6382 
6383   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6384   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6385   Register LoReg = MI.getOperand(0).getReg();
6386   Register HiReg = MI.getOperand(1).getReg();
6387   DebugLoc DL = MI.getDebugLoc();
6388 
6389   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6390   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6391       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6392       .addReg(RISCV::X0);
6393   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6394       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6395       .addReg(RISCV::X0);
6396   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6397       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6398       .addReg(RISCV::X0);
6399 
6400   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6401       .addReg(HiReg)
6402       .addReg(ReadAgainReg)
6403       .addMBB(LoopMBB);
6404 
6405   LoopMBB->addSuccessor(LoopMBB);
6406   LoopMBB->addSuccessor(DoneMBB);
6407 
6408   MI.eraseFromParent();
6409 
6410   return DoneMBB;
6411 }
6412 
6413 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6414                                              MachineBasicBlock *BB) {
6415   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6416 
6417   MachineFunction &MF = *BB->getParent();
6418   DebugLoc DL = MI.getDebugLoc();
6419   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6420   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6421   Register LoReg = MI.getOperand(0).getReg();
6422   Register HiReg = MI.getOperand(1).getReg();
6423   Register SrcReg = MI.getOperand(2).getReg();
6424   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6425   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6426 
6427   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6428                           RI);
6429   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6430   MachineMemOperand *MMOLo =
6431       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6432   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6433       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6434   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6435       .addFrameIndex(FI)
6436       .addImm(0)
6437       .addMemOperand(MMOLo);
6438   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6439       .addFrameIndex(FI)
6440       .addImm(4)
6441       .addMemOperand(MMOHi);
6442   MI.eraseFromParent(); // The pseudo instruction is gone now.
6443   return BB;
6444 }
6445 
6446 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6447                                                  MachineBasicBlock *BB) {
6448   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6449          "Unexpected instruction");
6450 
6451   MachineFunction &MF = *BB->getParent();
6452   DebugLoc DL = MI.getDebugLoc();
6453   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6454   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6455   Register DstReg = MI.getOperand(0).getReg();
6456   Register LoReg = MI.getOperand(1).getReg();
6457   Register HiReg = MI.getOperand(2).getReg();
6458   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6459   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6460 
6461   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6462   MachineMemOperand *MMOLo =
6463       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6464   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6465       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6466   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6467       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6468       .addFrameIndex(FI)
6469       .addImm(0)
6470       .addMemOperand(MMOLo);
6471   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6472       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6473       .addFrameIndex(FI)
6474       .addImm(4)
6475       .addMemOperand(MMOHi);
6476   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6477   MI.eraseFromParent(); // The pseudo instruction is gone now.
6478   return BB;
6479 }
6480 
6481 static bool isSelectPseudo(MachineInstr &MI) {
6482   switch (MI.getOpcode()) {
6483   default:
6484     return false;
6485   case RISCV::Select_GPR_Using_CC_GPR:
6486   case RISCV::Select_FPR16_Using_CC_GPR:
6487   case RISCV::Select_FPR32_Using_CC_GPR:
6488   case RISCV::Select_FPR64_Using_CC_GPR:
6489     return true;
6490   }
6491 }
6492 
6493 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6494                                            MachineBasicBlock *BB) {
6495   // To "insert" Select_* instructions, we actually have to insert the triangle
6496   // control-flow pattern.  The incoming instructions know the destination vreg
6497   // to set, the condition code register to branch on, the true/false values to
6498   // select between, and the condcode to use to select the appropriate branch.
6499   //
6500   // We produce the following control flow:
6501   //     HeadMBB
6502   //     |  \
6503   //     |  IfFalseMBB
6504   //     | /
6505   //    TailMBB
6506   //
6507   // When we find a sequence of selects we attempt to optimize their emission
6508   // by sharing the control flow. Currently we only handle cases where we have
6509   // multiple selects with the exact same condition (same LHS, RHS and CC).
6510   // The selects may be interleaved with other instructions if the other
6511   // instructions meet some requirements we deem safe:
6512   // - They are debug instructions. Otherwise,
6513   // - They do not have side-effects, do not access memory and their inputs do
6514   //   not depend on the results of the select pseudo-instructions.
6515   // The TrueV/FalseV operands of the selects cannot depend on the result of
6516   // previous selects in the sequence.
6517   // These conditions could be further relaxed. See the X86 target for a
6518   // related approach and more information.
6519   Register LHS = MI.getOperand(1).getReg();
6520   Register RHS = MI.getOperand(2).getReg();
6521   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6522 
6523   SmallVector<MachineInstr *, 4> SelectDebugValues;
6524   SmallSet<Register, 4> SelectDests;
6525   SelectDests.insert(MI.getOperand(0).getReg());
6526 
6527   MachineInstr *LastSelectPseudo = &MI;
6528 
6529   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6530        SequenceMBBI != E; ++SequenceMBBI) {
6531     if (SequenceMBBI->isDebugInstr())
6532       continue;
6533     else if (isSelectPseudo(*SequenceMBBI)) {
6534       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6535           SequenceMBBI->getOperand(2).getReg() != RHS ||
6536           SequenceMBBI->getOperand(3).getImm() != CC ||
6537           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6538           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6539         break;
6540       LastSelectPseudo = &*SequenceMBBI;
6541       SequenceMBBI->collectDebugValues(SelectDebugValues);
6542       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6543     } else {
6544       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6545           SequenceMBBI->mayLoadOrStore())
6546         break;
6547       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6548             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6549           }))
6550         break;
6551     }
6552   }
6553 
6554   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6555   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6556   DebugLoc DL = MI.getDebugLoc();
6557   MachineFunction::iterator I = ++BB->getIterator();
6558 
6559   MachineBasicBlock *HeadMBB = BB;
6560   MachineFunction *F = BB->getParent();
6561   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6562   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6563 
6564   F->insert(I, IfFalseMBB);
6565   F->insert(I, TailMBB);
6566 
6567   // Transfer debug instructions associated with the selects to TailMBB.
6568   for (MachineInstr *DebugInstr : SelectDebugValues) {
6569     TailMBB->push_back(DebugInstr->removeFromParent());
6570   }
6571 
6572   // Move all instructions after the sequence to TailMBB.
6573   TailMBB->splice(TailMBB->end(), HeadMBB,
6574                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6575   // Update machine-CFG edges by transferring all successors of the current
6576   // block to the new block which will contain the Phi nodes for the selects.
6577   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6578   // Set the successors for HeadMBB.
6579   HeadMBB->addSuccessor(IfFalseMBB);
6580   HeadMBB->addSuccessor(TailMBB);
6581 
6582   // Insert appropriate branch.
6583   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6584 
6585   BuildMI(HeadMBB, DL, TII.get(Opcode))
6586     .addReg(LHS)
6587     .addReg(RHS)
6588     .addMBB(TailMBB);
6589 
6590   // IfFalseMBB just falls through to TailMBB.
6591   IfFalseMBB->addSuccessor(TailMBB);
6592 
6593   // Create PHIs for all of the select pseudo-instructions.
6594   auto SelectMBBI = MI.getIterator();
6595   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6596   auto InsertionPoint = TailMBB->begin();
6597   while (SelectMBBI != SelectEnd) {
6598     auto Next = std::next(SelectMBBI);
6599     if (isSelectPseudo(*SelectMBBI)) {
6600       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6601       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6602               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6603           .addReg(SelectMBBI->getOperand(4).getReg())
6604           .addMBB(HeadMBB)
6605           .addReg(SelectMBBI->getOperand(5).getReg())
6606           .addMBB(IfFalseMBB);
6607       SelectMBBI->eraseFromParent();
6608     }
6609     SelectMBBI = Next;
6610   }
6611 
6612   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6613   return TailMBB;
6614 }
6615 
6616 MachineBasicBlock *
6617 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6618                                                  MachineBasicBlock *BB) const {
6619   switch (MI.getOpcode()) {
6620   default:
6621     llvm_unreachable("Unexpected instr type to insert");
6622   case RISCV::ReadCycleWide:
6623     assert(!Subtarget.is64Bit() &&
6624            "ReadCycleWrite is only to be used on riscv32");
6625     return emitReadCycleWidePseudo(MI, BB);
6626   case RISCV::Select_GPR_Using_CC_GPR:
6627   case RISCV::Select_FPR16_Using_CC_GPR:
6628   case RISCV::Select_FPR32_Using_CC_GPR:
6629   case RISCV::Select_FPR64_Using_CC_GPR:
6630     return emitSelectPseudo(MI, BB);
6631   case RISCV::BuildPairF64Pseudo:
6632     return emitBuildPairF64Pseudo(MI, BB);
6633   case RISCV::SplitF64Pseudo:
6634     return emitSplitF64Pseudo(MI, BB);
6635   }
6636 }
6637 
6638 // Calling Convention Implementation.
6639 // The expectations for frontend ABI lowering vary from target to target.
6640 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6641 // details, but this is a longer term goal. For now, we simply try to keep the
6642 // role of the frontend as simple and well-defined as possible. The rules can
6643 // be summarised as:
6644 // * Never split up large scalar arguments. We handle them here.
6645 // * If a hardfloat calling convention is being used, and the struct may be
6646 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6647 // available, then pass as two separate arguments. If either the GPRs or FPRs
6648 // are exhausted, then pass according to the rule below.
6649 // * If a struct could never be passed in registers or directly in a stack
6650 // slot (as it is larger than 2*XLEN and the floating point rules don't
6651 // apply), then pass it using a pointer with the byval attribute.
6652 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6653 // word-sized array or a 2*XLEN scalar (depending on alignment).
6654 // * The frontend can determine whether a struct is returned by reference or
6655 // not based on its size and fields. If it will be returned by reference, the
6656 // frontend must modify the prototype so a pointer with the sret annotation is
6657 // passed as the first argument. This is not necessary for large scalar
6658 // returns.
6659 // * Struct return values and varargs should be coerced to structs containing
6660 // register-size fields in the same situations they would be for fixed
6661 // arguments.
6662 
6663 static const MCPhysReg ArgGPRs[] = {
6664   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6665   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6666 };
6667 static const MCPhysReg ArgFPR16s[] = {
6668   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6669   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6670 };
6671 static const MCPhysReg ArgFPR32s[] = {
6672   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6673   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6674 };
6675 static const MCPhysReg ArgFPR64s[] = {
6676   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6677   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6678 };
6679 // This is an interim calling convention and it may be changed in the future.
6680 static const MCPhysReg ArgVRs[] = {
6681     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6682     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6683     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6684 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6685                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6686                                      RISCV::V20M2, RISCV::V22M2};
6687 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6688                                      RISCV::V20M4};
6689 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6690 
6691 // Pass a 2*XLEN argument that has been split into two XLEN values through
6692 // registers or the stack as necessary.
6693 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6694                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6695                                 MVT ValVT2, MVT LocVT2,
6696                                 ISD::ArgFlagsTy ArgFlags2) {
6697   unsigned XLenInBytes = XLen / 8;
6698   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6699     // At least one half can be passed via register.
6700     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6701                                      VA1.getLocVT(), CCValAssign::Full));
6702   } else {
6703     // Both halves must be passed on the stack, with proper alignment.
6704     Align StackAlign =
6705         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6706     State.addLoc(
6707         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6708                             State.AllocateStack(XLenInBytes, StackAlign),
6709                             VA1.getLocVT(), CCValAssign::Full));
6710     State.addLoc(CCValAssign::getMem(
6711         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6712         LocVT2, CCValAssign::Full));
6713     return false;
6714   }
6715 
6716   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6717     // The second half can also be passed via register.
6718     State.addLoc(
6719         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6720   } else {
6721     // The second half is passed via the stack, without additional alignment.
6722     State.addLoc(CCValAssign::getMem(
6723         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6724         LocVT2, CCValAssign::Full));
6725   }
6726 
6727   return false;
6728 }
6729 
6730 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
6731                                Optional<unsigned> FirstMaskArgument,
6732                                CCState &State, const RISCVTargetLowering &TLI) {
6733   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6734   if (RC == &RISCV::VRRegClass) {
6735     // Assign the first mask argument to V0.
6736     // This is an interim calling convention and it may be changed in the
6737     // future.
6738     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
6739       return State.AllocateReg(RISCV::V0);
6740     return State.AllocateReg(ArgVRs);
6741   }
6742   if (RC == &RISCV::VRM2RegClass)
6743     return State.AllocateReg(ArgVRM2s);
6744   if (RC == &RISCV::VRM4RegClass)
6745     return State.AllocateReg(ArgVRM4s);
6746   if (RC == &RISCV::VRM8RegClass)
6747     return State.AllocateReg(ArgVRM8s);
6748   llvm_unreachable("Unhandled register class for ValueType");
6749 }
6750 
6751 // Implements the RISC-V calling convention. Returns true upon failure.
6752 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6753                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6754                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6755                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6756                      Optional<unsigned> FirstMaskArgument) {
6757   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6758   assert(XLen == 32 || XLen == 64);
6759   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6760 
6761   // Any return value split in to more than two values can't be returned
6762   // directly. Vectors are returned via the available vector registers.
6763   if (!LocVT.isVector() && IsRet && ValNo > 1)
6764     return true;
6765 
6766   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6767   // variadic argument, or if no F16/F32 argument registers are available.
6768   bool UseGPRForF16_F32 = true;
6769   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6770   // variadic argument, or if no F64 argument registers are available.
6771   bool UseGPRForF64 = true;
6772 
6773   switch (ABI) {
6774   default:
6775     llvm_unreachable("Unexpected ABI");
6776   case RISCVABI::ABI_ILP32:
6777   case RISCVABI::ABI_LP64:
6778     break;
6779   case RISCVABI::ABI_ILP32F:
6780   case RISCVABI::ABI_LP64F:
6781     UseGPRForF16_F32 = !IsFixed;
6782     break;
6783   case RISCVABI::ABI_ILP32D:
6784   case RISCVABI::ABI_LP64D:
6785     UseGPRForF16_F32 = !IsFixed;
6786     UseGPRForF64 = !IsFixed;
6787     break;
6788   }
6789 
6790   // FPR16, FPR32, and FPR64 alias each other.
6791   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6792     UseGPRForF16_F32 = true;
6793     UseGPRForF64 = true;
6794   }
6795 
6796   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6797   // similar local variables rather than directly checking against the target
6798   // ABI.
6799 
6800   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6801     LocVT = XLenVT;
6802     LocInfo = CCValAssign::BCvt;
6803   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6804     LocVT = MVT::i64;
6805     LocInfo = CCValAssign::BCvt;
6806   }
6807 
6808   // If this is a variadic argument, the RISC-V calling convention requires
6809   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6810   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6811   // be used regardless of whether the original argument was split during
6812   // legalisation or not. The argument will not be passed by registers if the
6813   // original type is larger than 2*XLEN, so the register alignment rule does
6814   // not apply.
6815   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6816   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6817       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6818     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6819     // Skip 'odd' register if necessary.
6820     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6821       State.AllocateReg(ArgGPRs);
6822   }
6823 
6824   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6825   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6826       State.getPendingArgFlags();
6827 
6828   assert(PendingLocs.size() == PendingArgFlags.size() &&
6829          "PendingLocs and PendingArgFlags out of sync");
6830 
6831   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6832   // registers are exhausted.
6833   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6834     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6835            "Can't lower f64 if it is split");
6836     // Depending on available argument GPRS, f64 may be passed in a pair of
6837     // GPRs, split between a GPR and the stack, or passed completely on the
6838     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6839     // cases.
6840     Register Reg = State.AllocateReg(ArgGPRs);
6841     LocVT = MVT::i32;
6842     if (!Reg) {
6843       unsigned StackOffset = State.AllocateStack(8, Align(8));
6844       State.addLoc(
6845           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6846       return false;
6847     }
6848     if (!State.AllocateReg(ArgGPRs))
6849       State.AllocateStack(4, Align(4));
6850     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6851     return false;
6852   }
6853 
6854   // Fixed-length vectors are located in the corresponding scalable-vector
6855   // container types.
6856   if (ValVT.isFixedLengthVector())
6857     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6858 
6859   // Split arguments might be passed indirectly, so keep track of the pending
6860   // values. Split vectors are passed via a mix of registers and indirectly, so
6861   // treat them as we would any other argument.
6862   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6863     LocVT = XLenVT;
6864     LocInfo = CCValAssign::Indirect;
6865     PendingLocs.push_back(
6866         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6867     PendingArgFlags.push_back(ArgFlags);
6868     if (!ArgFlags.isSplitEnd()) {
6869       return false;
6870     }
6871   }
6872 
6873   // If the split argument only had two elements, it should be passed directly
6874   // in registers or on the stack.
6875   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6876     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6877     // Apply the normal calling convention rules to the first half of the
6878     // split argument.
6879     CCValAssign VA = PendingLocs[0];
6880     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6881     PendingLocs.clear();
6882     PendingArgFlags.clear();
6883     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6884                                ArgFlags);
6885   }
6886 
6887   // Allocate to a register if possible, or else a stack slot.
6888   Register Reg;
6889   unsigned StoreSizeBytes = XLen / 8;
6890   Align StackAlign = Align(XLen / 8);
6891 
6892   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6893     Reg = State.AllocateReg(ArgFPR16s);
6894   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6895     Reg = State.AllocateReg(ArgFPR32s);
6896   else if (ValVT == MVT::f64 && !UseGPRForF64)
6897     Reg = State.AllocateReg(ArgFPR64s);
6898   else if (ValVT.isVector()) {
6899     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
6900     if (!Reg) {
6901       // For return values, the vector must be passed fully via registers or
6902       // via the stack.
6903       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6904       // but we're using all of them.
6905       if (IsRet)
6906         return true;
6907       // Try using a GPR to pass the address
6908       if ((Reg = State.AllocateReg(ArgGPRs))) {
6909         LocVT = XLenVT;
6910         LocInfo = CCValAssign::Indirect;
6911       } else if (ValVT.isScalableVector()) {
6912         report_fatal_error("Unable to pass scalable vector types on the stack");
6913       } else {
6914         // Pass fixed-length vectors on the stack.
6915         LocVT = ValVT;
6916         StoreSizeBytes = ValVT.getStoreSize();
6917         // Align vectors to their element sizes, being careful for vXi1
6918         // vectors.
6919         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
6920       }
6921     }
6922   } else {
6923     Reg = State.AllocateReg(ArgGPRs);
6924   }
6925 
6926   unsigned StackOffset =
6927       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
6928 
6929   // If we reach this point and PendingLocs is non-empty, we must be at the
6930   // end of a split argument that must be passed indirectly.
6931   if (!PendingLocs.empty()) {
6932     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6933     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6934 
6935     for (auto &It : PendingLocs) {
6936       if (Reg)
6937         It.convertToReg(Reg);
6938       else
6939         It.convertToMem(StackOffset);
6940       State.addLoc(It);
6941     }
6942     PendingLocs.clear();
6943     PendingArgFlags.clear();
6944     return false;
6945   }
6946 
6947   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6948           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6949          "Expected an XLenVT or vector types at this stage");
6950 
6951   if (Reg) {
6952     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6953     return false;
6954   }
6955 
6956   // When a floating-point value is passed on the stack, no bit-conversion is
6957   // needed.
6958   if (ValVT.isFloatingPoint()) {
6959     LocVT = ValVT;
6960     LocInfo = CCValAssign::Full;
6961   }
6962   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6963   return false;
6964 }
6965 
6966 template <typename ArgTy>
6967 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6968   for (const auto &ArgIdx : enumerate(Args)) {
6969     MVT ArgVT = ArgIdx.value().VT;
6970     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6971       return ArgIdx.index();
6972   }
6973   return None;
6974 }
6975 
6976 void RISCVTargetLowering::analyzeInputArgs(
6977     MachineFunction &MF, CCState &CCInfo,
6978     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
6979     RISCVCCAssignFn Fn) const {
6980   unsigned NumArgs = Ins.size();
6981   FunctionType *FType = MF.getFunction().getFunctionType();
6982 
6983   Optional<unsigned> FirstMaskArgument;
6984   if (Subtarget.hasStdExtV())
6985     FirstMaskArgument = preAssignMask(Ins);
6986 
6987   for (unsigned i = 0; i != NumArgs; ++i) {
6988     MVT ArgVT = Ins[i].VT;
6989     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6990 
6991     Type *ArgTy = nullptr;
6992     if (IsRet)
6993       ArgTy = FType->getReturnType();
6994     else if (Ins[i].isOrigArg())
6995       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6996 
6997     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6998     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6999            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
7000            FirstMaskArgument)) {
7001       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
7002                         << EVT(ArgVT).getEVTString() << '\n');
7003       llvm_unreachable(nullptr);
7004     }
7005   }
7006 }
7007 
7008 void RISCVTargetLowering::analyzeOutputArgs(
7009     MachineFunction &MF, CCState &CCInfo,
7010     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
7011     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
7012   unsigned NumArgs = Outs.size();
7013 
7014   Optional<unsigned> FirstMaskArgument;
7015   if (Subtarget.hasStdExtV())
7016     FirstMaskArgument = preAssignMask(Outs);
7017 
7018   for (unsigned i = 0; i != NumArgs; i++) {
7019     MVT ArgVT = Outs[i].VT;
7020     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7021     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
7022 
7023     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7024     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
7025            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
7026            FirstMaskArgument)) {
7027       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
7028                         << EVT(ArgVT).getEVTString() << "\n");
7029       llvm_unreachable(nullptr);
7030     }
7031   }
7032 }
7033 
7034 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
7035 // values.
7036 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
7037                                    const CCValAssign &VA, const SDLoc &DL,
7038                                    const RISCVSubtarget &Subtarget) {
7039   switch (VA.getLocInfo()) {
7040   default:
7041     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7042   case CCValAssign::Full:
7043     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
7044       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
7045     break;
7046   case CCValAssign::BCvt:
7047     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7048       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
7049     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7050       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
7051     else
7052       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
7053     break;
7054   }
7055   return Val;
7056 }
7057 
7058 // The caller is responsible for loading the full value if the argument is
7059 // passed with CCValAssign::Indirect.
7060 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
7061                                 const CCValAssign &VA, const SDLoc &DL,
7062                                 const RISCVTargetLowering &TLI) {
7063   MachineFunction &MF = DAG.getMachineFunction();
7064   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7065   EVT LocVT = VA.getLocVT();
7066   SDValue Val;
7067   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
7068   Register VReg = RegInfo.createVirtualRegister(RC);
7069   RegInfo.addLiveIn(VA.getLocReg(), VReg);
7070   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
7071 
7072   if (VA.getLocInfo() == CCValAssign::Indirect)
7073     return Val;
7074 
7075   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
7076 }
7077 
7078 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
7079                                    const CCValAssign &VA, const SDLoc &DL,
7080                                    const RISCVSubtarget &Subtarget) {
7081   EVT LocVT = VA.getLocVT();
7082 
7083   switch (VA.getLocInfo()) {
7084   default:
7085     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7086   case CCValAssign::Full:
7087     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7088       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7089     break;
7090   case CCValAssign::BCvt:
7091     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7092       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7093     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7094       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7095     else
7096       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7097     break;
7098   }
7099   return Val;
7100 }
7101 
7102 // The caller is responsible for loading the full value if the argument is
7103 // passed with CCValAssign::Indirect.
7104 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7105                                 const CCValAssign &VA, const SDLoc &DL) {
7106   MachineFunction &MF = DAG.getMachineFunction();
7107   MachineFrameInfo &MFI = MF.getFrameInfo();
7108   EVT LocVT = VA.getLocVT();
7109   EVT ValVT = VA.getValVT();
7110   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7111   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
7112                                  /*Immutable=*/true);
7113   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7114   SDValue Val;
7115 
7116   ISD::LoadExtType ExtType;
7117   switch (VA.getLocInfo()) {
7118   default:
7119     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7120   case CCValAssign::Full:
7121   case CCValAssign::Indirect:
7122   case CCValAssign::BCvt:
7123     ExtType = ISD::NON_EXTLOAD;
7124     break;
7125   }
7126   Val = DAG.getExtLoad(
7127       ExtType, DL, LocVT, Chain, FIN,
7128       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7129   return Val;
7130 }
7131 
7132 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7133                                        const CCValAssign &VA, const SDLoc &DL) {
7134   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7135          "Unexpected VA");
7136   MachineFunction &MF = DAG.getMachineFunction();
7137   MachineFrameInfo &MFI = MF.getFrameInfo();
7138   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7139 
7140   if (VA.isMemLoc()) {
7141     // f64 is passed on the stack.
7142     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7143     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7144     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7145                        MachinePointerInfo::getFixedStack(MF, FI));
7146   }
7147 
7148   assert(VA.isRegLoc() && "Expected register VA assignment");
7149 
7150   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7151   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7152   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7153   SDValue Hi;
7154   if (VA.getLocReg() == RISCV::X17) {
7155     // Second half of f64 is passed on the stack.
7156     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7157     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7158     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7159                      MachinePointerInfo::getFixedStack(MF, FI));
7160   } else {
7161     // Second half of f64 is passed in another GPR.
7162     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7163     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7164     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7165   }
7166   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7167 }
7168 
7169 // FastCC has less than 1% performance improvement for some particular
7170 // benchmark. But theoretically, it may has benenfit for some cases.
7171 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7172                             unsigned ValNo, MVT ValVT, MVT LocVT,
7173                             CCValAssign::LocInfo LocInfo,
7174                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7175                             bool IsFixed, bool IsRet, Type *OrigTy,
7176                             const RISCVTargetLowering &TLI,
7177                             Optional<unsigned> FirstMaskArgument) {
7178 
7179   // X5 and X6 might be used for save-restore libcall.
7180   static const MCPhysReg GPRList[] = {
7181       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7182       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7183       RISCV::X29, RISCV::X30, RISCV::X31};
7184 
7185   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7186     if (unsigned Reg = State.AllocateReg(GPRList)) {
7187       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7188       return false;
7189     }
7190   }
7191 
7192   if (LocVT == MVT::f16) {
7193     static const MCPhysReg FPR16List[] = {
7194         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7195         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7196         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7197         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7198     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7199       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7200       return false;
7201     }
7202   }
7203 
7204   if (LocVT == MVT::f32) {
7205     static const MCPhysReg FPR32List[] = {
7206         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7207         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7208         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7209         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7210     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7211       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7212       return false;
7213     }
7214   }
7215 
7216   if (LocVT == MVT::f64) {
7217     static const MCPhysReg FPR64List[] = {
7218         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7219         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7220         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7221         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7222     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7223       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7224       return false;
7225     }
7226   }
7227 
7228   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7229     unsigned Offset4 = State.AllocateStack(4, Align(4));
7230     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7231     return false;
7232   }
7233 
7234   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7235     unsigned Offset5 = State.AllocateStack(8, Align(8));
7236     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7237     return false;
7238   }
7239 
7240   if (LocVT.isVector()) {
7241     if (unsigned Reg =
7242             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7243       // Fixed-length vectors are located in the corresponding scalable-vector
7244       // container types.
7245       if (ValVT.isFixedLengthVector())
7246         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7247       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7248     } else {
7249       // Try and pass the address via a "fast" GPR.
7250       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7251         LocInfo = CCValAssign::Indirect;
7252         LocVT = TLI.getSubtarget().getXLenVT();
7253         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7254       } else if (ValVT.isFixedLengthVector()) {
7255         auto StackAlign =
7256             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7257         unsigned StackOffset =
7258             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7259         State.addLoc(
7260             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7261       } else {
7262         // Can't pass scalable vectors on the stack.
7263         return true;
7264       }
7265     }
7266 
7267     return false;
7268   }
7269 
7270   return true; // CC didn't match.
7271 }
7272 
7273 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7274                          CCValAssign::LocInfo LocInfo,
7275                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7276 
7277   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7278     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7279     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7280     static const MCPhysReg GPRList[] = {
7281         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7282         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7283     if (unsigned Reg = State.AllocateReg(GPRList)) {
7284       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7285       return false;
7286     }
7287   }
7288 
7289   if (LocVT == MVT::f32) {
7290     // Pass in STG registers: F1, ..., F6
7291     //                        fs0 ... fs5
7292     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7293                                           RISCV::F18_F, RISCV::F19_F,
7294                                           RISCV::F20_F, RISCV::F21_F};
7295     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7296       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7297       return false;
7298     }
7299   }
7300 
7301   if (LocVT == MVT::f64) {
7302     // Pass in STG registers: D1, ..., D6
7303     //                        fs6 ... fs11
7304     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7305                                           RISCV::F24_D, RISCV::F25_D,
7306                                           RISCV::F26_D, RISCV::F27_D};
7307     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7308       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7309       return false;
7310     }
7311   }
7312 
7313   report_fatal_error("No registers left in GHC calling convention");
7314   return true;
7315 }
7316 
7317 // Transform physical registers into virtual registers.
7318 SDValue RISCVTargetLowering::LowerFormalArguments(
7319     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7320     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7321     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7322 
7323   MachineFunction &MF = DAG.getMachineFunction();
7324 
7325   switch (CallConv) {
7326   default:
7327     report_fatal_error("Unsupported calling convention");
7328   case CallingConv::C:
7329   case CallingConv::Fast:
7330     break;
7331   case CallingConv::GHC:
7332     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7333         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7334       report_fatal_error(
7335         "GHC calling convention requires the F and D instruction set extensions");
7336   }
7337 
7338   const Function &Func = MF.getFunction();
7339   if (Func.hasFnAttribute("interrupt")) {
7340     if (!Func.arg_empty())
7341       report_fatal_error(
7342         "Functions with the interrupt attribute cannot have arguments!");
7343 
7344     StringRef Kind =
7345       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7346 
7347     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7348       report_fatal_error(
7349         "Function interrupt attribute argument not supported!");
7350   }
7351 
7352   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7353   MVT XLenVT = Subtarget.getXLenVT();
7354   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7355   // Used with vargs to acumulate store chains.
7356   std::vector<SDValue> OutChains;
7357 
7358   // Assign locations to all of the incoming arguments.
7359   SmallVector<CCValAssign, 16> ArgLocs;
7360   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7361 
7362   if (CallConv == CallingConv::GHC)
7363     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7364   else
7365     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7366                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7367                                                    : CC_RISCV);
7368 
7369   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7370     CCValAssign &VA = ArgLocs[i];
7371     SDValue ArgValue;
7372     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7373     // case.
7374     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7375       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7376     else if (VA.isRegLoc())
7377       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7378     else
7379       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7380 
7381     if (VA.getLocInfo() == CCValAssign::Indirect) {
7382       // If the original argument was split and passed by reference (e.g. i128
7383       // on RV32), we need to load all parts of it here (using the same
7384       // address). Vectors may be partly split to registers and partly to the
7385       // stack, in which case the base address is partly offset and subsequent
7386       // stores are relative to that.
7387       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7388                                    MachinePointerInfo()));
7389       unsigned ArgIndex = Ins[i].OrigArgIndex;
7390       unsigned ArgPartOffset = Ins[i].PartOffset;
7391       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7392       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7393         CCValAssign &PartVA = ArgLocs[i + 1];
7394         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7395         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7396         if (PartVA.getValVT().isScalableVector())
7397           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7398         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7399         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7400                                      MachinePointerInfo()));
7401         ++i;
7402       }
7403       continue;
7404     }
7405     InVals.push_back(ArgValue);
7406   }
7407 
7408   if (IsVarArg) {
7409     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7410     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7411     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7412     MachineFrameInfo &MFI = MF.getFrameInfo();
7413     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7414     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7415 
7416     // Offset of the first variable argument from stack pointer, and size of
7417     // the vararg save area. For now, the varargs save area is either zero or
7418     // large enough to hold a0-a7.
7419     int VaArgOffset, VarArgsSaveSize;
7420 
7421     // If all registers are allocated, then all varargs must be passed on the
7422     // stack and we don't need to save any argregs.
7423     if (ArgRegs.size() == Idx) {
7424       VaArgOffset = CCInfo.getNextStackOffset();
7425       VarArgsSaveSize = 0;
7426     } else {
7427       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7428       VaArgOffset = -VarArgsSaveSize;
7429     }
7430 
7431     // Record the frame index of the first variable argument
7432     // which is a value necessary to VASTART.
7433     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7434     RVFI->setVarArgsFrameIndex(FI);
7435 
7436     // If saving an odd number of registers then create an extra stack slot to
7437     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7438     // offsets to even-numbered registered remain 2*XLEN-aligned.
7439     if (Idx % 2) {
7440       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7441       VarArgsSaveSize += XLenInBytes;
7442     }
7443 
7444     // Copy the integer registers that may have been used for passing varargs
7445     // to the vararg save area.
7446     for (unsigned I = Idx; I < ArgRegs.size();
7447          ++I, VaArgOffset += XLenInBytes) {
7448       const Register Reg = RegInfo.createVirtualRegister(RC);
7449       RegInfo.addLiveIn(ArgRegs[I], Reg);
7450       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7451       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7452       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7453       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7454                                    MachinePointerInfo::getFixedStack(MF, FI));
7455       cast<StoreSDNode>(Store.getNode())
7456           ->getMemOperand()
7457           ->setValue((Value *)nullptr);
7458       OutChains.push_back(Store);
7459     }
7460     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7461   }
7462 
7463   // All stores are grouped in one node to allow the matching between
7464   // the size of Ins and InVals. This only happens for vararg functions.
7465   if (!OutChains.empty()) {
7466     OutChains.push_back(Chain);
7467     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7468   }
7469 
7470   return Chain;
7471 }
7472 
7473 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7474 /// for tail call optimization.
7475 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7476 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7477     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7478     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7479 
7480   auto &Callee = CLI.Callee;
7481   auto CalleeCC = CLI.CallConv;
7482   auto &Outs = CLI.Outs;
7483   auto &Caller = MF.getFunction();
7484   auto CallerCC = Caller.getCallingConv();
7485 
7486   // Exception-handling functions need a special set of instructions to
7487   // indicate a return to the hardware. Tail-calling another function would
7488   // probably break this.
7489   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7490   // should be expanded as new function attributes are introduced.
7491   if (Caller.hasFnAttribute("interrupt"))
7492     return false;
7493 
7494   // Do not tail call opt if the stack is used to pass parameters.
7495   if (CCInfo.getNextStackOffset() != 0)
7496     return false;
7497 
7498   // Do not tail call opt if any parameters need to be passed indirectly.
7499   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7500   // passed indirectly. So the address of the value will be passed in a
7501   // register, or if not available, then the address is put on the stack. In
7502   // order to pass indirectly, space on the stack often needs to be allocated
7503   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7504   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7505   // are passed CCValAssign::Indirect.
7506   for (auto &VA : ArgLocs)
7507     if (VA.getLocInfo() == CCValAssign::Indirect)
7508       return false;
7509 
7510   // Do not tail call opt if either caller or callee uses struct return
7511   // semantics.
7512   auto IsCallerStructRet = Caller.hasStructRetAttr();
7513   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7514   if (IsCallerStructRet || IsCalleeStructRet)
7515     return false;
7516 
7517   // Externally-defined functions with weak linkage should not be
7518   // tail-called. The behaviour of branch instructions in this situation (as
7519   // used for tail calls) is implementation-defined, so we cannot rely on the
7520   // linker replacing the tail call with a return.
7521   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7522     const GlobalValue *GV = G->getGlobal();
7523     if (GV->hasExternalWeakLinkage())
7524       return false;
7525   }
7526 
7527   // The callee has to preserve all registers the caller needs to preserve.
7528   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7529   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7530   if (CalleeCC != CallerCC) {
7531     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7532     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7533       return false;
7534   }
7535 
7536   // Byval parameters hand the function a pointer directly into the stack area
7537   // we want to reuse during a tail call. Working around this *is* possible
7538   // but less efficient and uglier in LowerCall.
7539   for (auto &Arg : Outs)
7540     if (Arg.Flags.isByVal())
7541       return false;
7542 
7543   return true;
7544 }
7545 
7546 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7547   return DAG.getDataLayout().getPrefTypeAlign(
7548       VT.getTypeForEVT(*DAG.getContext()));
7549 }
7550 
7551 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7552 // and output parameter nodes.
7553 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7554                                        SmallVectorImpl<SDValue> &InVals) const {
7555   SelectionDAG &DAG = CLI.DAG;
7556   SDLoc &DL = CLI.DL;
7557   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7558   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7559   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7560   SDValue Chain = CLI.Chain;
7561   SDValue Callee = CLI.Callee;
7562   bool &IsTailCall = CLI.IsTailCall;
7563   CallingConv::ID CallConv = CLI.CallConv;
7564   bool IsVarArg = CLI.IsVarArg;
7565   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7566   MVT XLenVT = Subtarget.getXLenVT();
7567 
7568   MachineFunction &MF = DAG.getMachineFunction();
7569 
7570   // Analyze the operands of the call, assigning locations to each operand.
7571   SmallVector<CCValAssign, 16> ArgLocs;
7572   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7573 
7574   if (CallConv == CallingConv::GHC)
7575     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7576   else
7577     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7578                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7579                                                     : CC_RISCV);
7580 
7581   // Check if it's really possible to do a tail call.
7582   if (IsTailCall)
7583     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7584 
7585   if (IsTailCall)
7586     ++NumTailCalls;
7587   else if (CLI.CB && CLI.CB->isMustTailCall())
7588     report_fatal_error("failed to perform tail call elimination on a call "
7589                        "site marked musttail");
7590 
7591   // Get a count of how many bytes are to be pushed on the stack.
7592   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7593 
7594   // Create local copies for byval args
7595   SmallVector<SDValue, 8> ByValArgs;
7596   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7597     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7598     if (!Flags.isByVal())
7599       continue;
7600 
7601     SDValue Arg = OutVals[i];
7602     unsigned Size = Flags.getByValSize();
7603     Align Alignment = Flags.getNonZeroByValAlign();
7604 
7605     int FI =
7606         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7607     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7608     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7609 
7610     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7611                           /*IsVolatile=*/false,
7612                           /*AlwaysInline=*/false, IsTailCall,
7613                           MachinePointerInfo(), MachinePointerInfo());
7614     ByValArgs.push_back(FIPtr);
7615   }
7616 
7617   if (!IsTailCall)
7618     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7619 
7620   // Copy argument values to their designated locations.
7621   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7622   SmallVector<SDValue, 8> MemOpChains;
7623   SDValue StackPtr;
7624   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7625     CCValAssign &VA = ArgLocs[i];
7626     SDValue ArgValue = OutVals[i];
7627     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7628 
7629     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7630     bool IsF64OnRV32DSoftABI =
7631         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7632     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7633       SDValue SplitF64 = DAG.getNode(
7634           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7635       SDValue Lo = SplitF64.getValue(0);
7636       SDValue Hi = SplitF64.getValue(1);
7637 
7638       Register RegLo = VA.getLocReg();
7639       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7640 
7641       if (RegLo == RISCV::X17) {
7642         // Second half of f64 is passed on the stack.
7643         // Work out the address of the stack slot.
7644         if (!StackPtr.getNode())
7645           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7646         // Emit the store.
7647         MemOpChains.push_back(
7648             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7649       } else {
7650         // Second half of f64 is passed in another GPR.
7651         assert(RegLo < RISCV::X31 && "Invalid register pair");
7652         Register RegHigh = RegLo + 1;
7653         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7654       }
7655       continue;
7656     }
7657 
7658     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7659     // as any other MemLoc.
7660 
7661     // Promote the value if needed.
7662     // For now, only handle fully promoted and indirect arguments.
7663     if (VA.getLocInfo() == CCValAssign::Indirect) {
7664       // Store the argument in a stack slot and pass its address.
7665       Align StackAlign =
7666           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7667                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7668       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7669       // If the original argument was split (e.g. i128), we need
7670       // to store the required parts of it here (and pass just one address).
7671       // Vectors may be partly split to registers and partly to the stack, in
7672       // which case the base address is partly offset and subsequent stores are
7673       // relative to that.
7674       unsigned ArgIndex = Outs[i].OrigArgIndex;
7675       unsigned ArgPartOffset = Outs[i].PartOffset;
7676       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7677       // Calculate the total size to store. We don't have access to what we're
7678       // actually storing other than performing the loop and collecting the
7679       // info.
7680       SmallVector<std::pair<SDValue, SDValue>> Parts;
7681       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7682         SDValue PartValue = OutVals[i + 1];
7683         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7684         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7685         EVT PartVT = PartValue.getValueType();
7686         if (PartVT.isScalableVector())
7687           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7688         StoredSize += PartVT.getStoreSize();
7689         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7690         Parts.push_back(std::make_pair(PartValue, Offset));
7691         ++i;
7692       }
7693       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7694       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7695       MemOpChains.push_back(
7696           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7697                        MachinePointerInfo::getFixedStack(MF, FI)));
7698       for (const auto &Part : Parts) {
7699         SDValue PartValue = Part.first;
7700         SDValue PartOffset = Part.second;
7701         SDValue Address =
7702             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7703         MemOpChains.push_back(
7704             DAG.getStore(Chain, DL, PartValue, Address,
7705                          MachinePointerInfo::getFixedStack(MF, FI)));
7706       }
7707       ArgValue = SpillSlot;
7708     } else {
7709       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7710     }
7711 
7712     // Use local copy if it is a byval arg.
7713     if (Flags.isByVal())
7714       ArgValue = ByValArgs[j++];
7715 
7716     if (VA.isRegLoc()) {
7717       // Queue up the argument copies and emit them at the end.
7718       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7719     } else {
7720       assert(VA.isMemLoc() && "Argument not register or memory");
7721       assert(!IsTailCall && "Tail call not allowed if stack is used "
7722                             "for passing parameters");
7723 
7724       // Work out the address of the stack slot.
7725       if (!StackPtr.getNode())
7726         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7727       SDValue Address =
7728           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7729                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7730 
7731       // Emit the store.
7732       MemOpChains.push_back(
7733           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7734     }
7735   }
7736 
7737   // Join the stores, which are independent of one another.
7738   if (!MemOpChains.empty())
7739     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7740 
7741   SDValue Glue;
7742 
7743   // Build a sequence of copy-to-reg nodes, chained and glued together.
7744   for (auto &Reg : RegsToPass) {
7745     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7746     Glue = Chain.getValue(1);
7747   }
7748 
7749   // Validate that none of the argument registers have been marked as
7750   // reserved, if so report an error. Do the same for the return address if this
7751   // is not a tailcall.
7752   validateCCReservedRegs(RegsToPass, MF);
7753   if (!IsTailCall &&
7754       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7755     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7756         MF.getFunction(),
7757         "Return address register required, but has been reserved."});
7758 
7759   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7760   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7761   // split it and then direct call can be matched by PseudoCALL.
7762   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7763     const GlobalValue *GV = S->getGlobal();
7764 
7765     unsigned OpFlags = RISCVII::MO_CALL;
7766     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7767       OpFlags = RISCVII::MO_PLT;
7768 
7769     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7770   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7771     unsigned OpFlags = RISCVII::MO_CALL;
7772 
7773     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7774                                                  nullptr))
7775       OpFlags = RISCVII::MO_PLT;
7776 
7777     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7778   }
7779 
7780   // The first call operand is the chain and the second is the target address.
7781   SmallVector<SDValue, 8> Ops;
7782   Ops.push_back(Chain);
7783   Ops.push_back(Callee);
7784 
7785   // Add argument registers to the end of the list so that they are
7786   // known live into the call.
7787   for (auto &Reg : RegsToPass)
7788     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7789 
7790   if (!IsTailCall) {
7791     // Add a register mask operand representing the call-preserved registers.
7792     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7793     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7794     assert(Mask && "Missing call preserved mask for calling convention");
7795     Ops.push_back(DAG.getRegisterMask(Mask));
7796   }
7797 
7798   // Glue the call to the argument copies, if any.
7799   if (Glue.getNode())
7800     Ops.push_back(Glue);
7801 
7802   // Emit the call.
7803   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7804 
7805   if (IsTailCall) {
7806     MF.getFrameInfo().setHasTailCall();
7807     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7808   }
7809 
7810   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7811   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7812   Glue = Chain.getValue(1);
7813 
7814   // Mark the end of the call, which is glued to the call itself.
7815   Chain = DAG.getCALLSEQ_END(Chain,
7816                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7817                              DAG.getConstant(0, DL, PtrVT, true),
7818                              Glue, DL);
7819   Glue = Chain.getValue(1);
7820 
7821   // Assign locations to each value returned by this call.
7822   SmallVector<CCValAssign, 16> RVLocs;
7823   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7824   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
7825 
7826   // Copy all of the result registers out of their specified physreg.
7827   for (auto &VA : RVLocs) {
7828     // Copy the value out
7829     SDValue RetValue =
7830         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7831     // Glue the RetValue to the end of the call sequence
7832     Chain = RetValue.getValue(1);
7833     Glue = RetValue.getValue(2);
7834 
7835     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7836       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7837       SDValue RetValue2 =
7838           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7839       Chain = RetValue2.getValue(1);
7840       Glue = RetValue2.getValue(2);
7841       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7842                              RetValue2);
7843     }
7844 
7845     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7846 
7847     InVals.push_back(RetValue);
7848   }
7849 
7850   return Chain;
7851 }
7852 
7853 bool RISCVTargetLowering::CanLowerReturn(
7854     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7855     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7856   SmallVector<CCValAssign, 16> RVLocs;
7857   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7858 
7859   Optional<unsigned> FirstMaskArgument;
7860   if (Subtarget.hasStdExtV())
7861     FirstMaskArgument = preAssignMask(Outs);
7862 
7863   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7864     MVT VT = Outs[i].VT;
7865     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7866     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7867     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7868                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7869                  *this, FirstMaskArgument))
7870       return false;
7871   }
7872   return true;
7873 }
7874 
7875 SDValue
7876 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7877                                  bool IsVarArg,
7878                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7879                                  const SmallVectorImpl<SDValue> &OutVals,
7880                                  const SDLoc &DL, SelectionDAG &DAG) const {
7881   const MachineFunction &MF = DAG.getMachineFunction();
7882   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7883 
7884   // Stores the assignment of the return value to a location.
7885   SmallVector<CCValAssign, 16> RVLocs;
7886 
7887   // Info about the registers and stack slot.
7888   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7889                  *DAG.getContext());
7890 
7891   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7892                     nullptr, CC_RISCV);
7893 
7894   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7895     report_fatal_error("GHC functions return void only");
7896 
7897   SDValue Glue;
7898   SmallVector<SDValue, 4> RetOps(1, Chain);
7899 
7900   // Copy the result values into the output registers.
7901   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7902     SDValue Val = OutVals[i];
7903     CCValAssign &VA = RVLocs[i];
7904     assert(VA.isRegLoc() && "Can only return in registers!");
7905 
7906     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7907       // Handle returning f64 on RV32D with a soft float ABI.
7908       assert(VA.isRegLoc() && "Expected return via registers");
7909       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7910                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7911       SDValue Lo = SplitF64.getValue(0);
7912       SDValue Hi = SplitF64.getValue(1);
7913       Register RegLo = VA.getLocReg();
7914       assert(RegLo < RISCV::X31 && "Invalid register pair");
7915       Register RegHi = RegLo + 1;
7916 
7917       if (STI.isRegisterReservedByUser(RegLo) ||
7918           STI.isRegisterReservedByUser(RegHi))
7919         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7920             MF.getFunction(),
7921             "Return value register required, but has been reserved."});
7922 
7923       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7924       Glue = Chain.getValue(1);
7925       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7926       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7927       Glue = Chain.getValue(1);
7928       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7929     } else {
7930       // Handle a 'normal' return.
7931       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7932       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7933 
7934       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7935         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7936             MF.getFunction(),
7937             "Return value register required, but has been reserved."});
7938 
7939       // Guarantee that all emitted copies are stuck together.
7940       Glue = Chain.getValue(1);
7941       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7942     }
7943   }
7944 
7945   RetOps[0] = Chain; // Update chain.
7946 
7947   // Add the glue node if we have it.
7948   if (Glue.getNode()) {
7949     RetOps.push_back(Glue);
7950   }
7951 
7952   // Interrupt service routines use different return instructions.
7953   const Function &Func = DAG.getMachineFunction().getFunction();
7954   if (Func.hasFnAttribute("interrupt")) {
7955     if (!Func.getReturnType()->isVoidTy())
7956       report_fatal_error(
7957           "Functions with the interrupt attribute must have void return type!");
7958 
7959     MachineFunction &MF = DAG.getMachineFunction();
7960     StringRef Kind =
7961       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7962 
7963     unsigned RetOpc;
7964     if (Kind == "user")
7965       RetOpc = RISCVISD::URET_FLAG;
7966     else if (Kind == "supervisor")
7967       RetOpc = RISCVISD::SRET_FLAG;
7968     else
7969       RetOpc = RISCVISD::MRET_FLAG;
7970 
7971     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7972   }
7973 
7974   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7975 }
7976 
7977 void RISCVTargetLowering::validateCCReservedRegs(
7978     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7979     MachineFunction &MF) const {
7980   const Function &F = MF.getFunction();
7981   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7982 
7983   if (llvm::any_of(Regs, [&STI](auto Reg) {
7984         return STI.isRegisterReservedByUser(Reg.first);
7985       }))
7986     F.getContext().diagnose(DiagnosticInfoUnsupported{
7987         F, "Argument register required, but has been reserved."});
7988 }
7989 
7990 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7991   return CI->isTailCall();
7992 }
7993 
7994 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7995 #define NODE_NAME_CASE(NODE)                                                   \
7996   case RISCVISD::NODE:                                                         \
7997     return "RISCVISD::" #NODE;
7998   // clang-format off
7999   switch ((RISCVISD::NodeType)Opcode) {
8000   case RISCVISD::FIRST_NUMBER:
8001     break;
8002   NODE_NAME_CASE(RET_FLAG)
8003   NODE_NAME_CASE(URET_FLAG)
8004   NODE_NAME_CASE(SRET_FLAG)
8005   NODE_NAME_CASE(MRET_FLAG)
8006   NODE_NAME_CASE(CALL)
8007   NODE_NAME_CASE(SELECT_CC)
8008   NODE_NAME_CASE(BR_CC)
8009   NODE_NAME_CASE(BuildPairF64)
8010   NODE_NAME_CASE(SplitF64)
8011   NODE_NAME_CASE(TAIL)
8012   NODE_NAME_CASE(MULHSU)
8013   NODE_NAME_CASE(SLLW)
8014   NODE_NAME_CASE(SRAW)
8015   NODE_NAME_CASE(SRLW)
8016   NODE_NAME_CASE(DIVW)
8017   NODE_NAME_CASE(DIVUW)
8018   NODE_NAME_CASE(REMUW)
8019   NODE_NAME_CASE(ROLW)
8020   NODE_NAME_CASE(RORW)
8021   NODE_NAME_CASE(CLZW)
8022   NODE_NAME_CASE(CTZW)
8023   NODE_NAME_CASE(FSLW)
8024   NODE_NAME_CASE(FSRW)
8025   NODE_NAME_CASE(FSL)
8026   NODE_NAME_CASE(FSR)
8027   NODE_NAME_CASE(FMV_H_X)
8028   NODE_NAME_CASE(FMV_X_ANYEXTH)
8029   NODE_NAME_CASE(FMV_W_X_RV64)
8030   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
8031   NODE_NAME_CASE(READ_CYCLE_WIDE)
8032   NODE_NAME_CASE(GREV)
8033   NODE_NAME_CASE(GREVW)
8034   NODE_NAME_CASE(GORC)
8035   NODE_NAME_CASE(GORCW)
8036   NODE_NAME_CASE(SHFL)
8037   NODE_NAME_CASE(SHFLW)
8038   NODE_NAME_CASE(UNSHFL)
8039   NODE_NAME_CASE(UNSHFLW)
8040   NODE_NAME_CASE(BCOMPRESS)
8041   NODE_NAME_CASE(BCOMPRESSW)
8042   NODE_NAME_CASE(BDECOMPRESS)
8043   NODE_NAME_CASE(BDECOMPRESSW)
8044   NODE_NAME_CASE(VMV_V_X_VL)
8045   NODE_NAME_CASE(VFMV_V_F_VL)
8046   NODE_NAME_CASE(VMV_X_S)
8047   NODE_NAME_CASE(VMV_S_X_VL)
8048   NODE_NAME_CASE(VFMV_S_F_VL)
8049   NODE_NAME_CASE(SPLAT_VECTOR_I64)
8050   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
8051   NODE_NAME_CASE(READ_VLENB)
8052   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
8053   NODE_NAME_CASE(VSLIDEUP_VL)
8054   NODE_NAME_CASE(VSLIDE1UP_VL)
8055   NODE_NAME_CASE(VSLIDEDOWN_VL)
8056   NODE_NAME_CASE(VSLIDE1DOWN_VL)
8057   NODE_NAME_CASE(VID_VL)
8058   NODE_NAME_CASE(VFNCVT_ROD_VL)
8059   NODE_NAME_CASE(VECREDUCE_ADD_VL)
8060   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
8061   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
8062   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
8063   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
8064   NODE_NAME_CASE(VECREDUCE_AND_VL)
8065   NODE_NAME_CASE(VECREDUCE_OR_VL)
8066   NODE_NAME_CASE(VECREDUCE_XOR_VL)
8067   NODE_NAME_CASE(VECREDUCE_FADD_VL)
8068   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
8069   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
8070   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
8071   NODE_NAME_CASE(ADD_VL)
8072   NODE_NAME_CASE(AND_VL)
8073   NODE_NAME_CASE(MUL_VL)
8074   NODE_NAME_CASE(OR_VL)
8075   NODE_NAME_CASE(SDIV_VL)
8076   NODE_NAME_CASE(SHL_VL)
8077   NODE_NAME_CASE(SREM_VL)
8078   NODE_NAME_CASE(SRA_VL)
8079   NODE_NAME_CASE(SRL_VL)
8080   NODE_NAME_CASE(SUB_VL)
8081   NODE_NAME_CASE(UDIV_VL)
8082   NODE_NAME_CASE(UREM_VL)
8083   NODE_NAME_CASE(XOR_VL)
8084   NODE_NAME_CASE(FADD_VL)
8085   NODE_NAME_CASE(FSUB_VL)
8086   NODE_NAME_CASE(FMUL_VL)
8087   NODE_NAME_CASE(FDIV_VL)
8088   NODE_NAME_CASE(FNEG_VL)
8089   NODE_NAME_CASE(FABS_VL)
8090   NODE_NAME_CASE(FSQRT_VL)
8091   NODE_NAME_CASE(FMA_VL)
8092   NODE_NAME_CASE(FCOPYSIGN_VL)
8093   NODE_NAME_CASE(SMIN_VL)
8094   NODE_NAME_CASE(SMAX_VL)
8095   NODE_NAME_CASE(UMIN_VL)
8096   NODE_NAME_CASE(UMAX_VL)
8097   NODE_NAME_CASE(FMINNUM_VL)
8098   NODE_NAME_CASE(FMAXNUM_VL)
8099   NODE_NAME_CASE(MULHS_VL)
8100   NODE_NAME_CASE(MULHU_VL)
8101   NODE_NAME_CASE(FP_TO_SINT_VL)
8102   NODE_NAME_CASE(FP_TO_UINT_VL)
8103   NODE_NAME_CASE(SINT_TO_FP_VL)
8104   NODE_NAME_CASE(UINT_TO_FP_VL)
8105   NODE_NAME_CASE(FP_EXTEND_VL)
8106   NODE_NAME_CASE(FP_ROUND_VL)
8107   NODE_NAME_CASE(SETCC_VL)
8108   NODE_NAME_CASE(VSELECT_VL)
8109   NODE_NAME_CASE(VMAND_VL)
8110   NODE_NAME_CASE(VMOR_VL)
8111   NODE_NAME_CASE(VMXOR_VL)
8112   NODE_NAME_CASE(VMCLR_VL)
8113   NODE_NAME_CASE(VMSET_VL)
8114   NODE_NAME_CASE(VRGATHER_VX_VL)
8115   NODE_NAME_CASE(VRGATHER_VV_VL)
8116   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
8117   NODE_NAME_CASE(VSEXT_VL)
8118   NODE_NAME_CASE(VZEXT_VL)
8119   NODE_NAME_CASE(VPOPC_VL)
8120   NODE_NAME_CASE(VLE_VL)
8121   NODE_NAME_CASE(VSE_VL)
8122   NODE_NAME_CASE(READ_CSR)
8123   NODE_NAME_CASE(WRITE_CSR)
8124   NODE_NAME_CASE(SWAP_CSR)
8125   }
8126   // clang-format on
8127   return nullptr;
8128 #undef NODE_NAME_CASE
8129 }
8130 
8131 /// getConstraintType - Given a constraint letter, return the type of
8132 /// constraint it is for this target.
8133 RISCVTargetLowering::ConstraintType
8134 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8135   if (Constraint.size() == 1) {
8136     switch (Constraint[0]) {
8137     default:
8138       break;
8139     case 'f':
8140     case 'v':
8141       return C_RegisterClass;
8142     case 'I':
8143     case 'J':
8144     case 'K':
8145       return C_Immediate;
8146     case 'A':
8147       return C_Memory;
8148     }
8149   }
8150   return TargetLowering::getConstraintType(Constraint);
8151 }
8152 
8153 std::pair<unsigned, const TargetRegisterClass *>
8154 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8155                                                   StringRef Constraint,
8156                                                   MVT VT) const {
8157   // First, see if this is a constraint that directly corresponds to a
8158   // RISCV register class.
8159   if (Constraint.size() == 1) {
8160     switch (Constraint[0]) {
8161     case 'r':
8162       return std::make_pair(0U, &RISCV::GPRRegClass);
8163     case 'f':
8164       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8165         return std::make_pair(0U, &RISCV::FPR16RegClass);
8166       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8167         return std::make_pair(0U, &RISCV::FPR32RegClass);
8168       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8169         return std::make_pair(0U, &RISCV::FPR64RegClass);
8170       break;
8171     case 'v':
8172       for (const auto *RC :
8173            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8174             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8175         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8176           return std::make_pair(0U, RC);
8177       }
8178       break;
8179     default:
8180       break;
8181     }
8182   }
8183 
8184   // Clang will correctly decode the usage of register name aliases into their
8185   // official names. However, other frontends like `rustc` do not. This allows
8186   // users of these frontends to use the ABI names for registers in LLVM-style
8187   // register constraints.
8188   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8189                                .Case("{zero}", RISCV::X0)
8190                                .Case("{ra}", RISCV::X1)
8191                                .Case("{sp}", RISCV::X2)
8192                                .Case("{gp}", RISCV::X3)
8193                                .Case("{tp}", RISCV::X4)
8194                                .Case("{t0}", RISCV::X5)
8195                                .Case("{t1}", RISCV::X6)
8196                                .Case("{t2}", RISCV::X7)
8197                                .Cases("{s0}", "{fp}", RISCV::X8)
8198                                .Case("{s1}", RISCV::X9)
8199                                .Case("{a0}", RISCV::X10)
8200                                .Case("{a1}", RISCV::X11)
8201                                .Case("{a2}", RISCV::X12)
8202                                .Case("{a3}", RISCV::X13)
8203                                .Case("{a4}", RISCV::X14)
8204                                .Case("{a5}", RISCV::X15)
8205                                .Case("{a6}", RISCV::X16)
8206                                .Case("{a7}", RISCV::X17)
8207                                .Case("{s2}", RISCV::X18)
8208                                .Case("{s3}", RISCV::X19)
8209                                .Case("{s4}", RISCV::X20)
8210                                .Case("{s5}", RISCV::X21)
8211                                .Case("{s6}", RISCV::X22)
8212                                .Case("{s7}", RISCV::X23)
8213                                .Case("{s8}", RISCV::X24)
8214                                .Case("{s9}", RISCV::X25)
8215                                .Case("{s10}", RISCV::X26)
8216                                .Case("{s11}", RISCV::X27)
8217                                .Case("{t3}", RISCV::X28)
8218                                .Case("{t4}", RISCV::X29)
8219                                .Case("{t5}", RISCV::X30)
8220                                .Case("{t6}", RISCV::X31)
8221                                .Default(RISCV::NoRegister);
8222   if (XRegFromAlias != RISCV::NoRegister)
8223     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8224 
8225   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8226   // TableGen record rather than the AsmName to choose registers for InlineAsm
8227   // constraints, plus we want to match those names to the widest floating point
8228   // register type available, manually select floating point registers here.
8229   //
8230   // The second case is the ABI name of the register, so that frontends can also
8231   // use the ABI names in register constraint lists.
8232   if (Subtarget.hasStdExtF()) {
8233     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8234                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8235                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8236                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8237                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8238                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8239                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8240                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8241                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8242                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8243                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8244                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8245                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8246                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8247                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8248                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8249                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8250                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8251                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8252                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8253                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8254                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8255                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8256                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8257                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8258                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8259                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8260                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8261                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8262                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8263                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8264                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8265                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8266                         .Default(RISCV::NoRegister);
8267     if (FReg != RISCV::NoRegister) {
8268       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8269       if (Subtarget.hasStdExtD()) {
8270         unsigned RegNo = FReg - RISCV::F0_F;
8271         unsigned DReg = RISCV::F0_D + RegNo;
8272         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8273       }
8274       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8275     }
8276   }
8277 
8278   if (Subtarget.hasStdExtV()) {
8279     Register VReg = StringSwitch<Register>(Constraint.lower())
8280                         .Case("{v0}", RISCV::V0)
8281                         .Case("{v1}", RISCV::V1)
8282                         .Case("{v2}", RISCV::V2)
8283                         .Case("{v3}", RISCV::V3)
8284                         .Case("{v4}", RISCV::V4)
8285                         .Case("{v5}", RISCV::V5)
8286                         .Case("{v6}", RISCV::V6)
8287                         .Case("{v7}", RISCV::V7)
8288                         .Case("{v8}", RISCV::V8)
8289                         .Case("{v9}", RISCV::V9)
8290                         .Case("{v10}", RISCV::V10)
8291                         .Case("{v11}", RISCV::V11)
8292                         .Case("{v12}", RISCV::V12)
8293                         .Case("{v13}", RISCV::V13)
8294                         .Case("{v14}", RISCV::V14)
8295                         .Case("{v15}", RISCV::V15)
8296                         .Case("{v16}", RISCV::V16)
8297                         .Case("{v17}", RISCV::V17)
8298                         .Case("{v18}", RISCV::V18)
8299                         .Case("{v19}", RISCV::V19)
8300                         .Case("{v20}", RISCV::V20)
8301                         .Case("{v21}", RISCV::V21)
8302                         .Case("{v22}", RISCV::V22)
8303                         .Case("{v23}", RISCV::V23)
8304                         .Case("{v24}", RISCV::V24)
8305                         .Case("{v25}", RISCV::V25)
8306                         .Case("{v26}", RISCV::V26)
8307                         .Case("{v27}", RISCV::V27)
8308                         .Case("{v28}", RISCV::V28)
8309                         .Case("{v29}", RISCV::V29)
8310                         .Case("{v30}", RISCV::V30)
8311                         .Case("{v31}", RISCV::V31)
8312                         .Default(RISCV::NoRegister);
8313     if (VReg != RISCV::NoRegister) {
8314       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8315         return std::make_pair(VReg, &RISCV::VMRegClass);
8316       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8317         return std::make_pair(VReg, &RISCV::VRRegClass);
8318       for (const auto *RC :
8319            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8320         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8321           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8322           return std::make_pair(VReg, RC);
8323         }
8324       }
8325     }
8326   }
8327 
8328   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8329 }
8330 
8331 unsigned
8332 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8333   // Currently only support length 1 constraints.
8334   if (ConstraintCode.size() == 1) {
8335     switch (ConstraintCode[0]) {
8336     case 'A':
8337       return InlineAsm::Constraint_A;
8338     default:
8339       break;
8340     }
8341   }
8342 
8343   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8344 }
8345 
8346 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8347     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8348     SelectionDAG &DAG) const {
8349   // Currently only support length 1 constraints.
8350   if (Constraint.length() == 1) {
8351     switch (Constraint[0]) {
8352     case 'I':
8353       // Validate & create a 12-bit signed immediate operand.
8354       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8355         uint64_t CVal = C->getSExtValue();
8356         if (isInt<12>(CVal))
8357           Ops.push_back(
8358               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8359       }
8360       return;
8361     case 'J':
8362       // Validate & create an integer zero operand.
8363       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8364         if (C->getZExtValue() == 0)
8365           Ops.push_back(
8366               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8367       return;
8368     case 'K':
8369       // Validate & create a 5-bit unsigned immediate operand.
8370       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8371         uint64_t CVal = C->getZExtValue();
8372         if (isUInt<5>(CVal))
8373           Ops.push_back(
8374               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8375       }
8376       return;
8377     default:
8378       break;
8379     }
8380   }
8381   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8382 }
8383 
8384 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
8385                                                    Instruction *Inst,
8386                                                    AtomicOrdering Ord) const {
8387   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8388     return Builder.CreateFence(Ord);
8389   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8390     return Builder.CreateFence(AtomicOrdering::Release);
8391   return nullptr;
8392 }
8393 
8394 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
8395                                                     Instruction *Inst,
8396                                                     AtomicOrdering Ord) const {
8397   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8398     return Builder.CreateFence(AtomicOrdering::Acquire);
8399   return nullptr;
8400 }
8401 
8402 TargetLowering::AtomicExpansionKind
8403 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8404   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8405   // point operations can't be used in an lr/sc sequence without breaking the
8406   // forward-progress guarantee.
8407   if (AI->isFloatingPointOperation())
8408     return AtomicExpansionKind::CmpXChg;
8409 
8410   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8411   if (Size == 8 || Size == 16)
8412     return AtomicExpansionKind::MaskedIntrinsic;
8413   return AtomicExpansionKind::None;
8414 }
8415 
8416 static Intrinsic::ID
8417 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8418   if (XLen == 32) {
8419     switch (BinOp) {
8420     default:
8421       llvm_unreachable("Unexpected AtomicRMW BinOp");
8422     case AtomicRMWInst::Xchg:
8423       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8424     case AtomicRMWInst::Add:
8425       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8426     case AtomicRMWInst::Sub:
8427       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8428     case AtomicRMWInst::Nand:
8429       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8430     case AtomicRMWInst::Max:
8431       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8432     case AtomicRMWInst::Min:
8433       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8434     case AtomicRMWInst::UMax:
8435       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8436     case AtomicRMWInst::UMin:
8437       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8438     }
8439   }
8440 
8441   if (XLen == 64) {
8442     switch (BinOp) {
8443     default:
8444       llvm_unreachable("Unexpected AtomicRMW BinOp");
8445     case AtomicRMWInst::Xchg:
8446       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8447     case AtomicRMWInst::Add:
8448       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8449     case AtomicRMWInst::Sub:
8450       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8451     case AtomicRMWInst::Nand:
8452       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8453     case AtomicRMWInst::Max:
8454       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8455     case AtomicRMWInst::Min:
8456       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8457     case AtomicRMWInst::UMax:
8458       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8459     case AtomicRMWInst::UMin:
8460       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8461     }
8462   }
8463 
8464   llvm_unreachable("Unexpected XLen\n");
8465 }
8466 
8467 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8468     IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8469     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8470   unsigned XLen = Subtarget.getXLen();
8471   Value *Ordering =
8472       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8473   Type *Tys[] = {AlignedAddr->getType()};
8474   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8475       AI->getModule(),
8476       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8477 
8478   if (XLen == 64) {
8479     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8480     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8481     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8482   }
8483 
8484   Value *Result;
8485 
8486   // Must pass the shift amount needed to sign extend the loaded value prior
8487   // to performing a signed comparison for min/max. ShiftAmt is the number of
8488   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8489   // is the number of bits to left+right shift the value in order to
8490   // sign-extend.
8491   if (AI->getOperation() == AtomicRMWInst::Min ||
8492       AI->getOperation() == AtomicRMWInst::Max) {
8493     const DataLayout &DL = AI->getModule()->getDataLayout();
8494     unsigned ValWidth =
8495         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8496     Value *SextShamt =
8497         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8498     Result = Builder.CreateCall(LrwOpScwLoop,
8499                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8500   } else {
8501     Result =
8502         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8503   }
8504 
8505   if (XLen == 64)
8506     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8507   return Result;
8508 }
8509 
8510 TargetLowering::AtomicExpansionKind
8511 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8512     AtomicCmpXchgInst *CI) const {
8513   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8514   if (Size == 8 || Size == 16)
8515     return AtomicExpansionKind::MaskedIntrinsic;
8516   return AtomicExpansionKind::None;
8517 }
8518 
8519 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8520     IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8521     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8522   unsigned XLen = Subtarget.getXLen();
8523   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8524   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8525   if (XLen == 64) {
8526     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8527     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8528     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8529     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8530   }
8531   Type *Tys[] = {AlignedAddr->getType()};
8532   Function *MaskedCmpXchg =
8533       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8534   Value *Result = Builder.CreateCall(
8535       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8536   if (XLen == 64)
8537     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8538   return Result;
8539 }
8540 
8541 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8542   return false;
8543 }
8544 
8545 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8546                                                      EVT VT) const {
8547   VT = VT.getScalarType();
8548 
8549   if (!VT.isSimple())
8550     return false;
8551 
8552   switch (VT.getSimpleVT().SimpleTy) {
8553   case MVT::f16:
8554     return Subtarget.hasStdExtZfh();
8555   case MVT::f32:
8556     return Subtarget.hasStdExtF();
8557   case MVT::f64:
8558     return Subtarget.hasStdExtD();
8559   default:
8560     break;
8561   }
8562 
8563   return false;
8564 }
8565 
8566 Register RISCVTargetLowering::getExceptionPointerRegister(
8567     const Constant *PersonalityFn) const {
8568   return RISCV::X10;
8569 }
8570 
8571 Register RISCVTargetLowering::getExceptionSelectorRegister(
8572     const Constant *PersonalityFn) const {
8573   return RISCV::X11;
8574 }
8575 
8576 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8577   // Return false to suppress the unnecessary extensions if the LibCall
8578   // arguments or return value is f32 type for LP64 ABI.
8579   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8580   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8581     return false;
8582 
8583   return true;
8584 }
8585 
8586 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8587   if (Subtarget.is64Bit() && Type == MVT::i32)
8588     return true;
8589 
8590   return IsSigned;
8591 }
8592 
8593 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8594                                                  SDValue C) const {
8595   // Check integral scalar types.
8596   if (VT.isScalarInteger()) {
8597     // Omit the optimization if the sub target has the M extension and the data
8598     // size exceeds XLen.
8599     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8600       return false;
8601     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8602       // Break the MUL to a SLLI and an ADD/SUB.
8603       const APInt &Imm = ConstNode->getAPIntValue();
8604       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8605           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8606         return true;
8607       // Omit the following optimization if the sub target has the M extension
8608       // and the data size >= XLen.
8609       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8610         return false;
8611       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8612       // a pair of LUI/ADDI.
8613       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8614         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8615         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8616             (1 - ImmS).isPowerOf2())
8617         return true;
8618       }
8619     }
8620   }
8621 
8622   return false;
8623 }
8624 
8625 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8626     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8627     bool *Fast) const {
8628   if (!VT.isVector())
8629     return false;
8630 
8631   EVT ElemVT = VT.getVectorElementType();
8632   if (Alignment >= ElemVT.getStoreSize()) {
8633     if (Fast)
8634       *Fast = true;
8635     return true;
8636   }
8637 
8638   return false;
8639 }
8640 
8641 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8642     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8643     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8644   bool IsABIRegCopy = CC.hasValue();
8645   EVT ValueVT = Val.getValueType();
8646   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8647     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8648     // and cast to f32.
8649     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8650     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8651     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8652                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8653     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8654     Parts[0] = Val;
8655     return true;
8656   }
8657 
8658   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8659     LLVMContext &Context = *DAG.getContext();
8660     EVT ValueEltVT = ValueVT.getVectorElementType();
8661     EVT PartEltVT = PartVT.getVectorElementType();
8662     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8663     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8664     if (PartVTBitSize % ValueVTBitSize == 0) {
8665       // If the element types are different, bitcast to the same element type of
8666       // PartVT first.
8667       if (ValueEltVT != PartEltVT) {
8668         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8669         assert(Count != 0 && "The number of element should not be zero.");
8670         EVT SameEltTypeVT =
8671             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8672         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8673       }
8674       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8675                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8676       Parts[0] = Val;
8677       return true;
8678     }
8679   }
8680   return false;
8681 }
8682 
8683 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8684     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8685     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8686   bool IsABIRegCopy = CC.hasValue();
8687   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8688     SDValue Val = Parts[0];
8689 
8690     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8691     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8692     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8693     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8694     return Val;
8695   }
8696 
8697   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8698     LLVMContext &Context = *DAG.getContext();
8699     SDValue Val = Parts[0];
8700     EVT ValueEltVT = ValueVT.getVectorElementType();
8701     EVT PartEltVT = PartVT.getVectorElementType();
8702     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8703     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8704     if (PartVTBitSize % ValueVTBitSize == 0) {
8705       EVT SameEltTypeVT = ValueVT;
8706       // If the element types are different, convert it to the same element type
8707       // of PartVT.
8708       if (ValueEltVT != PartEltVT) {
8709         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8710         assert(Count != 0 && "The number of element should not be zero.");
8711         SameEltTypeVT =
8712             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8713       }
8714       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8715                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8716       if (ValueEltVT != PartEltVT)
8717         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8718       return Val;
8719     }
8720   }
8721   return SDValue();
8722 }
8723 
8724 #define GET_REGISTER_MATCHER
8725 #include "RISCVGenAsmMatcher.inc"
8726 
8727 Register
8728 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8729                                        const MachineFunction &MF) const {
8730   Register Reg = MatchRegisterAltName(RegName);
8731   if (Reg == RISCV::NoRegister)
8732     Reg = MatchRegisterName(RegName);
8733   if (Reg == RISCV::NoRegister)
8734     report_fatal_error(
8735         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8736   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8737   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8738     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8739                              StringRef(RegName) + "\"."));
8740   return Reg;
8741 }
8742 
8743 namespace llvm {
8744 namespace RISCVVIntrinsicsTable {
8745 
8746 #define GET_RISCVVIntrinsicsTable_IMPL
8747 #include "RISCVGenSearchableTables.inc"
8748 
8749 } // namespace RISCVVIntrinsicsTable
8750 
8751 } // namespace llvm
8752