1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
254     setOperationAction(ISD::BSWAP, XLenVT, Custom);
255 
256     if (Subtarget.is64Bit()) {
257       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
258       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
259     }
260   } else {
261     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
262     // pattern match it directly in isel.
263     setOperationAction(ISD::BSWAP, XLenVT,
264                        Subtarget.hasStdExtZbb() ? Legal : Expand);
265   }
266 
267   if (Subtarget.hasStdExtZbb()) {
268     setOperationAction(ISD::SMIN, XLenVT, Legal);
269     setOperationAction(ISD::SMAX, XLenVT, Legal);
270     setOperationAction(ISD::UMIN, XLenVT, Legal);
271     setOperationAction(ISD::UMAX, XLenVT, Legal);
272 
273     if (Subtarget.is64Bit()) {
274       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
275       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
276       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
277       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
278     }
279   } else {
280     setOperationAction(ISD::CTTZ, XLenVT, Expand);
281     setOperationAction(ISD::CTLZ, XLenVT, Expand);
282     setOperationAction(ISD::CTPOP, XLenVT, Expand);
283   }
284 
285   if (Subtarget.hasStdExtZbt()) {
286     setOperationAction(ISD::FSHL, XLenVT, Custom);
287     setOperationAction(ISD::FSHR, XLenVT, Custom);
288     setOperationAction(ISD::SELECT, XLenVT, Legal);
289 
290     if (Subtarget.is64Bit()) {
291       setOperationAction(ISD::FSHL, MVT::i32, Custom);
292       setOperationAction(ISD::FSHR, MVT::i32, Custom);
293     }
294   } else {
295     setOperationAction(ISD::SELECT, XLenVT, Custom);
296   }
297 
298   ISD::CondCode FPCCToExpand[] = {
299       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
300       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
301       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
302 
303   ISD::NodeType FPOpToExpand[] = {
304       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
305       ISD::FP_TO_FP16};
306 
307   if (Subtarget.hasStdExtZfh())
308     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
309 
310   if (Subtarget.hasStdExtZfh()) {
311     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
312     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
313     for (auto CC : FPCCToExpand)
314       setCondCodeAction(CC, MVT::f16, Expand);
315     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
316     setOperationAction(ISD::SELECT, MVT::f16, Custom);
317     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
318     for (auto Op : FPOpToExpand)
319       setOperationAction(Op, MVT::f16, Expand);
320   }
321 
322   if (Subtarget.hasStdExtF()) {
323     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
324     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
325     for (auto CC : FPCCToExpand)
326       setCondCodeAction(CC, MVT::f32, Expand);
327     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
328     setOperationAction(ISD::SELECT, MVT::f32, Custom);
329     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
330     for (auto Op : FPOpToExpand)
331       setOperationAction(Op, MVT::f32, Expand);
332     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
333     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
334   }
335 
336   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
337     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
338 
339   if (Subtarget.hasStdExtD()) {
340     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
341     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
342     for (auto CC : FPCCToExpand)
343       setCondCodeAction(CC, MVT::f64, Expand);
344     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
345     setOperationAction(ISD::SELECT, MVT::f64, Custom);
346     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
347     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
348     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
349     for (auto Op : FPOpToExpand)
350       setOperationAction(Op, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
353   }
354 
355   if (Subtarget.is64Bit()) {
356     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
357     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
358     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
359     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
360   }
361 
362   if (Subtarget.hasStdExtF()) {
363     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
364     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
365   }
366 
367   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
368   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
369   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
370   setOperationAction(ISD::JumpTable, XLenVT, Custom);
371 
372   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
373 
374   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
375   // Unfortunately this can't be determined just from the ISA naming string.
376   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
377                      Subtarget.is64Bit() ? Legal : Custom);
378 
379   setOperationAction(ISD::TRAP, MVT::Other, Legal);
380   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
381   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
382   if (Subtarget.is64Bit())
383     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
384 
385   if (Subtarget.hasStdExtA()) {
386     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
387     setMinCmpXchgSizeInBits(32);
388   } else {
389     setMaxAtomicSizeInBitsSupported(0);
390   }
391 
392   setBooleanContents(ZeroOrOneBooleanContent);
393 
394   if (Subtarget.hasStdExtV()) {
395     setBooleanVectorContents(ZeroOrOneBooleanContent);
396 
397     setOperationAction(ISD::VSCALE, XLenVT, Custom);
398 
399     // RVV intrinsics may have illegal operands.
400     // We also need to custom legalize vmv.x.s.
401     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
402     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
403     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
404     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
405     if (Subtarget.is64Bit()) {
406       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
407     } else {
408       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
409       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
410     }
411 
412     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
413 
414     static unsigned IntegerVPOps[] = {
415         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
416         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
417         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
418 
419     if (!Subtarget.is64Bit()) {
420       // We must custom-lower certain vXi64 operations on RV32 due to the vector
421       // element type being illegal.
422       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
423       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
424 
425       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
426       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
427       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
428       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
429       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
430       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
431       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
432       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
433     }
434 
435     for (MVT VT : BoolVecVTs) {
436       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
437 
438       // Mask VTs are custom-expanded into a series of standard nodes
439       setOperationAction(ISD::TRUNCATE, VT, Custom);
440       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
441       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
442 
443       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
444 
445       setOperationAction(ISD::SELECT, VT, Expand);
446       setOperationAction(ISD::SELECT_CC, VT, Expand);
447 
448       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
449       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
450       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
451 
452       // Expand all extending loads to types larger than this, and truncating
453       // stores from types larger than this.
454       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
455         setTruncStoreAction(OtherVT, VT, Expand);
456         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
457         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
458         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
459       }
460     }
461 
462     for (MVT VT : IntVecVTs) {
463       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
464       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
465 
466       setOperationAction(ISD::SMIN, VT, Legal);
467       setOperationAction(ISD::SMAX, VT, Legal);
468       setOperationAction(ISD::UMIN, VT, Legal);
469       setOperationAction(ISD::UMAX, VT, Legal);
470 
471       setOperationAction(ISD::ROTL, VT, Expand);
472       setOperationAction(ISD::ROTR, VT, Expand);
473 
474       // Custom-lower extensions and truncations from/to mask types.
475       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
476       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
477       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
478 
479       // RVV has native int->float & float->int conversions where the
480       // element type sizes are within one power-of-two of each other. Any
481       // wider distances between type sizes have to be lowered as sequences
482       // which progressively narrow the gap in stages.
483       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
484       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
485       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
486       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
487 
488       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
489       // nodes which truncate by one power of two at a time.
490       setOperationAction(ISD::TRUNCATE, VT, Custom);
491 
492       // Custom-lower insert/extract operations to simplify patterns.
493       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
494       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
495 
496       // Custom-lower reduction operations to set up the corresponding custom
497       // nodes' operands.
498       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
499       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
500       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
501       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
502       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
503       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
504       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
505       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
506 
507       for (unsigned VPOpc : IntegerVPOps) {
508         setOperationAction(VPOpc, VT, Custom);
509         // RV64 must custom-legalize the i32 EVL parameter.
510         if (Subtarget.is64Bit())
511           setOperationAction(VPOpc, MVT::i32, Custom);
512       }
513 
514       setOperationAction(ISD::MLOAD, VT, Custom);
515       setOperationAction(ISD::MSTORE, VT, Custom);
516       setOperationAction(ISD::MGATHER, VT, Custom);
517       setOperationAction(ISD::MSCATTER, VT, Custom);
518 
519       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
520       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
521       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
522 
523       setOperationAction(ISD::SELECT, VT, Expand);
524       setOperationAction(ISD::SELECT_CC, VT, Expand);
525 
526       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
527       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
528 
529       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
530         setTruncStoreAction(VT, OtherVT, Expand);
531         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
532         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
533         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
534       }
535     }
536 
537     // Expand various CCs to best match the RVV ISA, which natively supports UNE
538     // but no other unordered comparisons, and supports all ordered comparisons
539     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
540     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
541     // and we pattern-match those back to the "original", swapping operands once
542     // more. This way we catch both operations and both "vf" and "fv" forms with
543     // fewer patterns.
544     ISD::CondCode VFPCCToExpand[] = {
545         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
546         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
547         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
548     };
549 
550     // Sets common operation actions on RVV floating-point vector types.
551     const auto SetCommonVFPActions = [&](MVT VT) {
552       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
553       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
554       // sizes are within one power-of-two of each other. Therefore conversions
555       // between vXf16 and vXf64 must be lowered as sequences which convert via
556       // vXf32.
557       setOperationAction(ISD::FP_ROUND, VT, Custom);
558       setOperationAction(ISD::FP_EXTEND, VT, Custom);
559       // Custom-lower insert/extract operations to simplify patterns.
560       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
561       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
562       // Expand various condition codes (explained above).
563       for (auto CC : VFPCCToExpand)
564         setCondCodeAction(CC, VT, Expand);
565 
566       setOperationAction(ISD::FMINNUM, VT, Legal);
567       setOperationAction(ISD::FMAXNUM, VT, Legal);
568 
569       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
570       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
571       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
572       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
573       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
574 
575       setOperationAction(ISD::MLOAD, VT, Custom);
576       setOperationAction(ISD::MSTORE, VT, Custom);
577       setOperationAction(ISD::MGATHER, VT, Custom);
578       setOperationAction(ISD::MSCATTER, VT, Custom);
579 
580       setOperationAction(ISD::SELECT, VT, Expand);
581       setOperationAction(ISD::SELECT_CC, VT, Expand);
582 
583       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
584       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
585       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
586 
587       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
588     };
589 
590     // Sets common extload/truncstore actions on RVV floating-point vector
591     // types.
592     const auto SetCommonVFPExtLoadTruncStoreActions =
593         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
594           for (auto SmallVT : SmallerVTs) {
595             setTruncStoreAction(VT, SmallVT, Expand);
596             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
597           }
598         };
599 
600     if (Subtarget.hasStdExtZfh())
601       for (MVT VT : F16VecVTs)
602         SetCommonVFPActions(VT);
603 
604     for (MVT VT : F32VecVTs) {
605       if (Subtarget.hasStdExtF())
606         SetCommonVFPActions(VT);
607       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
608     }
609 
610     for (MVT VT : F64VecVTs) {
611       if (Subtarget.hasStdExtD())
612         SetCommonVFPActions(VT);
613       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
614       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
615     }
616 
617     if (Subtarget.useRVVForFixedLengthVectors()) {
618       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
619         if (!useRVVForFixedLengthVectorVT(VT))
620           continue;
621 
622         // By default everything must be expanded.
623         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
624           setOperationAction(Op, VT, Expand);
625         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
626           setTruncStoreAction(VT, OtherVT, Expand);
627           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
628           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
629           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
630         }
631 
632         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
633         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
634         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
635 
636         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
637         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
638 
639         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
640 
641         setOperationAction(ISD::LOAD, VT, Custom);
642         setOperationAction(ISD::STORE, VT, Custom);
643 
644         setOperationAction(ISD::SETCC, VT, Custom);
645 
646         setOperationAction(ISD::TRUNCATE, VT, Custom);
647 
648         setOperationAction(ISD::BITCAST, VT, Custom);
649 
650         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
651         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
652         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
653 
654         // Operations below are different for between masks and other vectors.
655         if (VT.getVectorElementType() == MVT::i1) {
656           setOperationAction(ISD::AND, VT, Custom);
657           setOperationAction(ISD::OR, VT, Custom);
658           setOperationAction(ISD::XOR, VT, Custom);
659           continue;
660         }
661 
662         // Use SPLAT_VECTOR to prevent type legalization from destroying the
663         // splats when type legalizing i64 scalar on RV32.
664         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
665         // improvements first.
666         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
667           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
668           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
669         }
670 
671         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
672         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
673 
674         setOperationAction(ISD::MLOAD, VT, Custom);
675         setOperationAction(ISD::MSTORE, VT, Custom);
676         setOperationAction(ISD::MGATHER, VT, Custom);
677         setOperationAction(ISD::MSCATTER, VT, Custom);
678         setOperationAction(ISD::ADD, VT, Custom);
679         setOperationAction(ISD::MUL, VT, Custom);
680         setOperationAction(ISD::SUB, VT, Custom);
681         setOperationAction(ISD::AND, VT, Custom);
682         setOperationAction(ISD::OR, VT, Custom);
683         setOperationAction(ISD::XOR, VT, Custom);
684         setOperationAction(ISD::SDIV, VT, Custom);
685         setOperationAction(ISD::SREM, VT, Custom);
686         setOperationAction(ISD::UDIV, VT, Custom);
687         setOperationAction(ISD::UREM, VT, Custom);
688         setOperationAction(ISD::SHL, VT, Custom);
689         setOperationAction(ISD::SRA, VT, Custom);
690         setOperationAction(ISD::SRL, VT, Custom);
691 
692         setOperationAction(ISD::SMIN, VT, Custom);
693         setOperationAction(ISD::SMAX, VT, Custom);
694         setOperationAction(ISD::UMIN, VT, Custom);
695         setOperationAction(ISD::UMAX, VT, Custom);
696         setOperationAction(ISD::ABS,  VT, Custom);
697 
698         setOperationAction(ISD::MULHS, VT, Custom);
699         setOperationAction(ISD::MULHU, VT, Custom);
700 
701         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
702         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
703         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
704         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
705 
706         setOperationAction(ISD::VSELECT, VT, Custom);
707         setOperationAction(ISD::SELECT, VT, Expand);
708         setOperationAction(ISD::SELECT_CC, VT, Expand);
709 
710         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
711         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
712         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
713 
714         // Custom-lower reduction operations to set up the corresponding custom
715         // nodes' operands.
716         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
717         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
718         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
719         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
720         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
721 
722         for (unsigned VPOpc : IntegerVPOps) {
723           setOperationAction(VPOpc, VT, Custom);
724           // RV64 must custom-legalize the i32 EVL parameter.
725           if (Subtarget.is64Bit())
726             setOperationAction(VPOpc, MVT::i32, Custom);
727         }
728       }
729 
730       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
731         if (!useRVVForFixedLengthVectorVT(VT))
732           continue;
733 
734         // By default everything must be expanded.
735         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
736           setOperationAction(Op, VT, Expand);
737         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
738           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
739           setTruncStoreAction(VT, OtherVT, Expand);
740         }
741 
742         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
743         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
744         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
745 
746         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
747         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
748         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
749         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
750 
751         setOperationAction(ISD::LOAD, VT, Custom);
752         setOperationAction(ISD::STORE, VT, Custom);
753         setOperationAction(ISD::MLOAD, VT, Custom);
754         setOperationAction(ISD::MSTORE, VT, Custom);
755         setOperationAction(ISD::MGATHER, VT, Custom);
756         setOperationAction(ISD::MSCATTER, VT, Custom);
757         setOperationAction(ISD::FADD, VT, Custom);
758         setOperationAction(ISD::FSUB, VT, Custom);
759         setOperationAction(ISD::FMUL, VT, Custom);
760         setOperationAction(ISD::FDIV, VT, Custom);
761         setOperationAction(ISD::FNEG, VT, Custom);
762         setOperationAction(ISD::FABS, VT, Custom);
763         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
764         setOperationAction(ISD::FSQRT, VT, Custom);
765         setOperationAction(ISD::FMA, VT, Custom);
766         setOperationAction(ISD::FMINNUM, VT, Custom);
767         setOperationAction(ISD::FMAXNUM, VT, Custom);
768 
769         setOperationAction(ISD::FP_ROUND, VT, Custom);
770         setOperationAction(ISD::FP_EXTEND, VT, Custom);
771 
772         for (auto CC : VFPCCToExpand)
773           setCondCodeAction(CC, VT, Expand);
774 
775         setOperationAction(ISD::VSELECT, VT, Custom);
776         setOperationAction(ISD::SELECT, VT, Expand);
777         setOperationAction(ISD::SELECT_CC, VT, Expand);
778 
779         setOperationAction(ISD::BITCAST, VT, Custom);
780 
781         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
782         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
783         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
784         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
785       }
786 
787       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
788       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
789       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
790       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
791       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
792       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
793       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
794       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
795     }
796   }
797 
798   // Function alignments.
799   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
800   setMinFunctionAlignment(FunctionAlignment);
801   setPrefFunctionAlignment(FunctionAlignment);
802 
803   setMinimumJumpTableEntries(5);
804 
805   // Jumps are expensive, compared to logic
806   setJumpIsExpensive();
807 
808   // We can use any register for comparisons
809   setHasMultipleConditionRegisters();
810 
811   setTargetDAGCombine(ISD::AND);
812   setTargetDAGCombine(ISD::OR);
813   setTargetDAGCombine(ISD::XOR);
814   if (Subtarget.hasStdExtV()) {
815     setTargetDAGCombine(ISD::FCOPYSIGN);
816     setTargetDAGCombine(ISD::MGATHER);
817     setTargetDAGCombine(ISD::MSCATTER);
818   }
819 }
820 
821 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
822                                             LLVMContext &Context,
823                                             EVT VT) const {
824   if (!VT.isVector())
825     return getPointerTy(DL);
826   if (Subtarget.hasStdExtV() &&
827       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
828     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
829   return VT.changeVectorElementTypeToInteger();
830 }
831 
832 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
833                                              const CallInst &I,
834                                              MachineFunction &MF,
835                                              unsigned Intrinsic) const {
836   switch (Intrinsic) {
837   default:
838     return false;
839   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
840   case Intrinsic::riscv_masked_atomicrmw_add_i32:
841   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
842   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
843   case Intrinsic::riscv_masked_atomicrmw_max_i32:
844   case Intrinsic::riscv_masked_atomicrmw_min_i32:
845   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
846   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
847   case Intrinsic::riscv_masked_cmpxchg_i32:
848     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
849     Info.opc = ISD::INTRINSIC_W_CHAIN;
850     Info.memVT = MVT::getVT(PtrTy->getElementType());
851     Info.ptrVal = I.getArgOperand(0);
852     Info.offset = 0;
853     Info.align = Align(4);
854     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
855                  MachineMemOperand::MOVolatile;
856     return true;
857   }
858 }
859 
860 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
861                                                 const AddrMode &AM, Type *Ty,
862                                                 unsigned AS,
863                                                 Instruction *I) const {
864   // No global is ever allowed as a base.
865   if (AM.BaseGV)
866     return false;
867 
868   // Require a 12-bit signed offset.
869   if (!isInt<12>(AM.BaseOffs))
870     return false;
871 
872   switch (AM.Scale) {
873   case 0: // "r+i" or just "i", depending on HasBaseReg.
874     break;
875   case 1:
876     if (!AM.HasBaseReg) // allow "r+i".
877       break;
878     return false; // disallow "r+r" or "r+r+i".
879   default:
880     return false;
881   }
882 
883   return true;
884 }
885 
886 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
887   return isInt<12>(Imm);
888 }
889 
890 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
891   return isInt<12>(Imm);
892 }
893 
894 // On RV32, 64-bit integers are split into their high and low parts and held
895 // in two different registers, so the trunc is free since the low register can
896 // just be used.
897 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
898   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
899     return false;
900   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
901   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
902   return (SrcBits == 64 && DestBits == 32);
903 }
904 
905 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
906   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
907       !SrcVT.isInteger() || !DstVT.isInteger())
908     return false;
909   unsigned SrcBits = SrcVT.getSizeInBits();
910   unsigned DestBits = DstVT.getSizeInBits();
911   return (SrcBits == 64 && DestBits == 32);
912 }
913 
914 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
915   // Zexts are free if they can be combined with a load.
916   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
917     EVT MemVT = LD->getMemoryVT();
918     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
919          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
920         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
921          LD->getExtensionType() == ISD::ZEXTLOAD))
922       return true;
923   }
924 
925   return TargetLowering::isZExtFree(Val, VT2);
926 }
927 
928 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
929   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
930 }
931 
932 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
933   return Subtarget.hasStdExtZbb();
934 }
935 
936 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
937   return Subtarget.hasStdExtZbb();
938 }
939 
940 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
941                                        bool ForCodeSize) const {
942   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
943     return false;
944   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
945     return false;
946   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
947     return false;
948   if (Imm.isNegZero())
949     return false;
950   return Imm.isZero();
951 }
952 
953 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
954   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
955          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
956          (VT == MVT::f64 && Subtarget.hasStdExtD());
957 }
958 
959 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
960                                                       CallingConv::ID CC,
961                                                       EVT VT) const {
962   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
963   // end up using a GPR but that will be decided based on ABI.
964   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
965     return MVT::f32;
966 
967   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
968 }
969 
970 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
971                                                            CallingConv::ID CC,
972                                                            EVT VT) const {
973   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
974   // end up using a GPR but that will be decided based on ABI.
975   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
976     return 1;
977 
978   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
979 }
980 
981 // Changes the condition code and swaps operands if necessary, so the SetCC
982 // operation matches one of the comparisons supported directly by branches
983 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
984 // with 1/-1.
985 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
986                                     ISD::CondCode &CC, SelectionDAG &DAG) {
987   // Convert X > -1 to X >= 0.
988   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
989     RHS = DAG.getConstant(0, DL, RHS.getValueType());
990     CC = ISD::SETGE;
991     return;
992   }
993   // Convert X < 1 to 0 >= X.
994   if (CC == ISD::SETLT && isOneConstant(RHS)) {
995     RHS = LHS;
996     LHS = DAG.getConstant(0, DL, RHS.getValueType());
997     CC = ISD::SETGE;
998     return;
999   }
1000 
1001   switch (CC) {
1002   default:
1003     break;
1004   case ISD::SETGT:
1005   case ISD::SETLE:
1006   case ISD::SETUGT:
1007   case ISD::SETULE:
1008     CC = ISD::getSetCCSwappedOperands(CC);
1009     std::swap(LHS, RHS);
1010     break;
1011   }
1012 }
1013 
1014 // Return the RISC-V branch opcode that matches the given DAG integer
1015 // condition code. The CondCode must be one of those supported by the RISC-V
1016 // ISA (see translateSetCCForBranch).
1017 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1018   switch (CC) {
1019   default:
1020     llvm_unreachable("Unsupported CondCode");
1021   case ISD::SETEQ:
1022     return RISCV::BEQ;
1023   case ISD::SETNE:
1024     return RISCV::BNE;
1025   case ISD::SETLT:
1026     return RISCV::BLT;
1027   case ISD::SETGE:
1028     return RISCV::BGE;
1029   case ISD::SETULT:
1030     return RISCV::BLTU;
1031   case ISD::SETUGE:
1032     return RISCV::BGEU;
1033   }
1034 }
1035 
1036 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1037   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1038   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1039   if (VT.getVectorElementType() == MVT::i1)
1040     KnownSize *= 8;
1041 
1042   switch (KnownSize) {
1043   default:
1044     llvm_unreachable("Invalid LMUL.");
1045   case 8:
1046     return RISCVII::VLMUL::LMUL_F8;
1047   case 16:
1048     return RISCVII::VLMUL::LMUL_F4;
1049   case 32:
1050     return RISCVII::VLMUL::LMUL_F2;
1051   case 64:
1052     return RISCVII::VLMUL::LMUL_1;
1053   case 128:
1054     return RISCVII::VLMUL::LMUL_2;
1055   case 256:
1056     return RISCVII::VLMUL::LMUL_4;
1057   case 512:
1058     return RISCVII::VLMUL::LMUL_8;
1059   }
1060 }
1061 
1062 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1063   switch (LMul) {
1064   default:
1065     llvm_unreachable("Invalid LMUL.");
1066   case RISCVII::VLMUL::LMUL_F8:
1067   case RISCVII::VLMUL::LMUL_F4:
1068   case RISCVII::VLMUL::LMUL_F2:
1069   case RISCVII::VLMUL::LMUL_1:
1070     return RISCV::VRRegClassID;
1071   case RISCVII::VLMUL::LMUL_2:
1072     return RISCV::VRM2RegClassID;
1073   case RISCVII::VLMUL::LMUL_4:
1074     return RISCV::VRM4RegClassID;
1075   case RISCVII::VLMUL::LMUL_8:
1076     return RISCV::VRM8RegClassID;
1077   }
1078 }
1079 
1080 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1081   RISCVII::VLMUL LMUL = getLMUL(VT);
1082   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1083       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1084       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1085       LMUL == RISCVII::VLMUL::LMUL_1) {
1086     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1087                   "Unexpected subreg numbering");
1088     return RISCV::sub_vrm1_0 + Index;
1089   }
1090   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1091     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1092                   "Unexpected subreg numbering");
1093     return RISCV::sub_vrm2_0 + Index;
1094   }
1095   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1096     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1097                   "Unexpected subreg numbering");
1098     return RISCV::sub_vrm4_0 + Index;
1099   }
1100   llvm_unreachable("Invalid vector type.");
1101 }
1102 
1103 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1104   if (VT.getVectorElementType() == MVT::i1)
1105     return RISCV::VRRegClassID;
1106   return getRegClassIDForLMUL(getLMUL(VT));
1107 }
1108 
1109 // Attempt to decompose a subvector insert/extract between VecVT and
1110 // SubVecVT via subregister indices. Returns the subregister index that
1111 // can perform the subvector insert/extract with the given element index, as
1112 // well as the index corresponding to any leftover subvectors that must be
1113 // further inserted/extracted within the register class for SubVecVT.
1114 std::pair<unsigned, unsigned>
1115 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1116     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1117     const RISCVRegisterInfo *TRI) {
1118   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1119                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1120                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1121                 "Register classes not ordered");
1122   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1123   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1124   // Try to compose a subregister index that takes us from the incoming
1125   // LMUL>1 register class down to the outgoing one. At each step we half
1126   // the LMUL:
1127   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1128   // Note that this is not guaranteed to find a subregister index, such as
1129   // when we are extracting from one VR type to another.
1130   unsigned SubRegIdx = RISCV::NoSubRegister;
1131   for (const unsigned RCID :
1132        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1133     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1134       VecVT = VecVT.getHalfNumVectorElementsVT();
1135       bool IsHi =
1136           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1137       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1138                                             getSubregIndexByMVT(VecVT, IsHi));
1139       if (IsHi)
1140         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1141     }
1142   return {SubRegIdx, InsertExtractIdx};
1143 }
1144 
1145 static bool useRVVForFixedLengthVectorVT(MVT VT,
1146                                          const RISCVSubtarget &Subtarget) {
1147   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1148   if (!Subtarget.useRVVForFixedLengthVectors())
1149     return false;
1150 
1151   // We only support a set of vector types with an equivalent number of
1152   // elements to avoid legalization issues. Therefore -- since we don't have
1153   // v512i8/v512i16/etc -- the longest fixed-length vector type we support has
1154   // 256 elements.
1155   if (VT.getVectorNumElements() > 256)
1156     return false;
1157 
1158   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1159 
1160   // Don't use RVV for vectors we cannot scalarize if required.
1161   switch (VT.getVectorElementType().SimpleTy) {
1162   // i1 is supported but has different rules.
1163   default:
1164     return false;
1165   case MVT::i1:
1166     // Masks can only use a single register.
1167     if (VT.getVectorNumElements() > MinVLen)
1168       return false;
1169     MinVLen /= 8;
1170     break;
1171   case MVT::i8:
1172   case MVT::i16:
1173   case MVT::i32:
1174   case MVT::i64:
1175     break;
1176   case MVT::f16:
1177     if (!Subtarget.hasStdExtZfh())
1178       return false;
1179     break;
1180   case MVT::f32:
1181     if (!Subtarget.hasStdExtF())
1182       return false;
1183     break;
1184   case MVT::f64:
1185     if (!Subtarget.hasStdExtD())
1186       return false;
1187     break;
1188   }
1189 
1190   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1191   // Don't use RVV for types that don't fit.
1192   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1193     return false;
1194 
1195   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1196   // the base fixed length RVV support in place.
1197   if (!VT.isPow2VectorType())
1198     return false;
1199 
1200   return true;
1201 }
1202 
1203 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1204   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1205 }
1206 
1207 // Return the largest legal scalable vector type that matches VT's element type.
1208 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1209                                             const RISCVSubtarget &Subtarget) {
1210   // This may be called before legal types are setup.
1211   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1212           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1213          "Expected legal fixed length vector!");
1214 
1215   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1216 
1217   MVT EltVT = VT.getVectorElementType();
1218   switch (EltVT.SimpleTy) {
1219   default:
1220     llvm_unreachable("unexpected element type for RVV container");
1221   case MVT::i1:
1222   case MVT::i8:
1223   case MVT::i16:
1224   case MVT::i32:
1225   case MVT::i64:
1226   case MVT::f16:
1227   case MVT::f32:
1228   case MVT::f64: {
1229     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1230     // narrower types, but we can't have a fractional LMUL with demoninator less
1231     // than 64/SEW.
1232     unsigned NumElts =
1233         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1234     return MVT::getScalableVectorVT(EltVT, NumElts);
1235   }
1236   }
1237 }
1238 
1239 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1240                                             const RISCVSubtarget &Subtarget) {
1241   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1242                                           Subtarget);
1243 }
1244 
1245 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1246   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1247 }
1248 
1249 // Grow V to consume an entire RVV register.
1250 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1251                                        const RISCVSubtarget &Subtarget) {
1252   assert(VT.isScalableVector() &&
1253          "Expected to convert into a scalable vector!");
1254   assert(V.getValueType().isFixedLengthVector() &&
1255          "Expected a fixed length vector operand!");
1256   SDLoc DL(V);
1257   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1258   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1259 }
1260 
1261 // Shrink V so it's just big enough to maintain a VT's worth of data.
1262 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1263                                          const RISCVSubtarget &Subtarget) {
1264   assert(VT.isFixedLengthVector() &&
1265          "Expected to convert into a fixed length vector!");
1266   assert(V.getValueType().isScalableVector() &&
1267          "Expected a scalable vector operand!");
1268   SDLoc DL(V);
1269   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1270   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1271 }
1272 
1273 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1274 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1275 // the vector type that it is contained in.
1276 static std::pair<SDValue, SDValue>
1277 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1278                 const RISCVSubtarget &Subtarget) {
1279   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1280   MVT XLenVT = Subtarget.getXLenVT();
1281   SDValue VL = VecVT.isFixedLengthVector()
1282                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1283                    : DAG.getRegister(RISCV::X0, XLenVT);
1284   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1285   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1286   return {Mask, VL};
1287 }
1288 
1289 // As above but assuming the given type is a scalable vector type.
1290 static std::pair<SDValue, SDValue>
1291 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1292                         const RISCVSubtarget &Subtarget) {
1293   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1294   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1295 }
1296 
1297 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1298 // of either is (currently) supported. This can get us into an infinite loop
1299 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1300 // as a ..., etc.
1301 // Until either (or both) of these can reliably lower any node, reporting that
1302 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1303 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1304 // which is not desirable.
1305 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1306     EVT VT, unsigned DefinedValues) const {
1307   return false;
1308 }
1309 
1310 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1311   // Only splats are currently supported.
1312   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1313     return true;
1314 
1315   return false;
1316 }
1317 
1318 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1319                                  const RISCVSubtarget &Subtarget) {
1320   MVT VT = Op.getSimpleValueType();
1321   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1322 
1323   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1324 
1325   SDLoc DL(Op);
1326   SDValue Mask, VL;
1327   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1328 
1329   unsigned Opc =
1330       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1331   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1332   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1333 }
1334 
1335 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1336                                  const RISCVSubtarget &Subtarget) {
1337   MVT VT = Op.getSimpleValueType();
1338   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1339 
1340   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1341 
1342   SDLoc DL(Op);
1343   SDValue Mask, VL;
1344   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1345 
1346   MVT XLenVT = Subtarget.getXLenVT();
1347   unsigned NumElts = Op.getNumOperands();
1348 
1349   if (VT.getVectorElementType() == MVT::i1) {
1350     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1351       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1352       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1353     }
1354 
1355     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1356       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1357       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1358     }
1359 
1360     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1361     // scalar integer chunks whose bit-width depends on the number of mask
1362     // bits and XLEN.
1363     // First, determine the most appropriate scalar integer type to use. This
1364     // is at most XLenVT, but may be shrunk to a smaller vector element type
1365     // according to the size of the final vector - use i8 chunks rather than
1366     // XLenVT if we're producing a v8i1. This results in more consistent
1367     // codegen across RV32 and RV64.
1368     // If we have to use more than one INSERT_VECTOR_ELT then this optimization
1369     // is likely to increase code size; avoid peforming it in such a case.
1370     unsigned NumViaIntegerBits =
1371         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1372     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1373         (!DAG.shouldOptForSize() || NumElts <= NumViaIntegerBits)) {
1374       // Now we can create our integer vector type. Note that it may be larger
1375       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1376       MVT IntegerViaVecVT =
1377           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1378                            divideCeil(NumElts, NumViaIntegerBits));
1379 
1380       uint64_t Bits = 0;
1381       unsigned BitPos = 0, IntegerEltIdx = 0;
1382       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1383 
1384       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1385         // Once we accumulate enough bits to fill our scalar type, insert into
1386         // our vector and clear our accumulated data.
1387         if (I != 0 && I % NumViaIntegerBits == 0) {
1388           if (NumViaIntegerBits <= 32)
1389             Bits = SignExtend64(Bits, 32);
1390           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1391           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1392                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1393           Bits = 0;
1394           BitPos = 0;
1395           IntegerEltIdx++;
1396         }
1397         SDValue V = Op.getOperand(I);
1398         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1399         Bits |= ((uint64_t)BitValue << BitPos);
1400       }
1401 
1402       // Insert the (remaining) scalar value into position in our integer
1403       // vector type.
1404       if (NumViaIntegerBits <= 32)
1405         Bits = SignExtend64(Bits, 32);
1406       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1407       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1408                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1409 
1410       if (NumElts < NumViaIntegerBits) {
1411         // If we're producing a smaller vector than our minimum legal integer
1412         // type, bitcast to the equivalent (known-legal) mask type, and extract
1413         // our final mask.
1414         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1415         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1416         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1417                           DAG.getConstant(0, DL, XLenVT));
1418       } else {
1419         // Else we must have produced an integer type with the same size as the
1420         // mask type; bitcast for the final result.
1421         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1422         Vec = DAG.getBitcast(VT, Vec);
1423       }
1424 
1425       return Vec;
1426     }
1427 
1428     // A splat can be lowered as a SETCC. For each fixed-length mask vector
1429     // type, we have a legal equivalently-sized i8 type, so we can use that.
1430     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1431       assert(Splat.getValueType() == XLenVT &&
1432              "Unexpected type for i1 splat value");
1433       MVT InterVT = VT.changeVectorElementType(MVT::i8);
1434       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1435                           DAG.getConstant(1, DL, XLenVT));
1436       Splat = DAG.getSplatBuildVector(InterVT, DL, Splat);
1437       SDValue Zero = DAG.getConstant(0, DL, InterVT);
1438       return DAG.getSetCC(DL, VT, Splat, Zero, ISD::SETNE);
1439     }
1440 
1441     return SDValue();
1442   }
1443 
1444   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1445     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1446                                         : RISCVISD::VMV_V_X_VL;
1447     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1448     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1449   }
1450 
1451   // Try and match an index sequence, which we can lower directly to the vid
1452   // instruction. An all-undef vector is matched by getSplatValue, above.
1453   if (VT.isInteger()) {
1454     bool IsVID = true;
1455     for (unsigned I = 0; I < NumElts && IsVID; I++)
1456       IsVID &= Op.getOperand(I).isUndef() ||
1457                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1458                 Op.getConstantOperandVal(I) == I);
1459 
1460     if (IsVID) {
1461       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1462       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1463     }
1464   }
1465 
1466   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1467   // when re-interpreted as a vector with a larger element type. For example,
1468   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1469   // could be instead splat as
1470   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1471   // TODO: This optimization could also work on non-constant splats, but it
1472   // would require bit-manipulation instructions to construct the splat value.
1473   SmallVector<SDValue> Sequence;
1474   unsigned EltBitSize = VT.getScalarSizeInBits();
1475   const auto *BV = cast<BuildVectorSDNode>(Op);
1476   if (VT.isInteger() && EltBitSize < 64 &&
1477       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1478       BV->getRepeatedSequence(Sequence) &&
1479       (Sequence.size() * EltBitSize) <= 64) {
1480     unsigned SeqLen = Sequence.size();
1481     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1482     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1483     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1484             ViaIntVT == MVT::i64) &&
1485            "Unexpected sequence type");
1486 
1487     unsigned EltIdx = 0;
1488     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1489     uint64_t SplatValue = 0;
1490     // Construct the amalgamated value which can be splatted as this larger
1491     // vector type.
1492     for (const auto &SeqV : Sequence) {
1493       if (!SeqV.isUndef())
1494         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1495                        << (EltIdx * EltBitSize));
1496       EltIdx++;
1497     }
1498 
1499     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1500     // achieve better constant materializion.
1501     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1502       SplatValue = SignExtend64(SplatValue, 32);
1503 
1504     // Since we can't introduce illegal i64 types at this stage, we can only
1505     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1506     // way we can use RVV instructions to splat.
1507     assert((ViaIntVT.bitsLE(XLenVT) ||
1508             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1509            "Unexpected bitcast sequence");
1510     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1511       SDValue ViaVL =
1512           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1513       MVT ViaContainerVT =
1514           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1515       SDValue Splat =
1516           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1517                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1518       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1519       return DAG.getBitcast(VT, Splat);
1520     }
1521   }
1522 
1523   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1524   // which constitute a large proportion of the elements. In such cases we can
1525   // splat a vector with the dominant element and make up the shortfall with
1526   // INSERT_VECTOR_ELTs.
1527   // Note that this includes vectors of 2 elements by association. The
1528   // upper-most element is the "dominant" one, allowing us to use a splat to
1529   // "insert" the upper element, and an insert of the lower element at position
1530   // 0, which improves codegen.
1531   SDValue DominantValue;
1532   unsigned MostCommonCount = 0;
1533   DenseMap<SDValue, unsigned> ValueCounts;
1534   unsigned NumUndefElts =
1535       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1536 
1537   for (SDValue V : Op->op_values()) {
1538     if (V.isUndef())
1539       continue;
1540 
1541     ValueCounts.insert(std::make_pair(V, 0));
1542     unsigned &Count = ValueCounts[V];
1543 
1544     // Is this value dominant? In case of a tie, prefer the highest element as
1545     // it's cheaper to insert near the beginning of a vector than it is at the
1546     // end.
1547     if (++Count >= MostCommonCount) {
1548       DominantValue = V;
1549       MostCommonCount = Count;
1550     }
1551   }
1552 
1553   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1554   unsigned NumDefElts = NumElts - NumUndefElts;
1555   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1556 
1557   // Don't perform this optimization when optimizing for size, since
1558   // materializing elements and inserting them tends to cause code bloat.
1559   if (!DAG.shouldOptForSize() &&
1560       ((MostCommonCount > DominantValueCountThreshold) ||
1561        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1562     // Start by splatting the most common element.
1563     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1564 
1565     DenseSet<SDValue> Processed{DominantValue};
1566     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1567     for (const auto &OpIdx : enumerate(Op->ops())) {
1568       const SDValue &V = OpIdx.value();
1569       if (V.isUndef() || !Processed.insert(V).second)
1570         continue;
1571       if (ValueCounts[V] == 1) {
1572         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1573                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1574       } else {
1575         // Blend in all instances of this value using a VSELECT, using a
1576         // mask where each bit signals whether that element is the one
1577         // we're after.
1578         SmallVector<SDValue> Ops;
1579         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1580           return DAG.getConstant(V == V1, DL, XLenVT);
1581         });
1582         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1583                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1584                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1585       }
1586     }
1587 
1588     return Vec;
1589   }
1590 
1591   return SDValue();
1592 }
1593 
1594 // Use a stack slot to splat the two i32 values in Lo/Hi to the vector desired
1595 // vector nxvXi64 VT.
1596 static SDValue splatPartsI64ThroughStack(const SDLoc &DL, MVT VT, SDValue Lo,
1597                                          SDValue Hi, SDValue VL,
1598                                          SelectionDAG &DAG) {
1599   assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
1600          Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
1601          "Unexpected VTs!");
1602   MachineFunction &MF = DAG.getMachineFunction();
1603   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
1604 
1605   // We use the same frame index we use for moving two i32s into 64-bit FPR.
1606   // This is an analogous operation.
1607   int FI = FuncInfo->getMoveF64FrameIndex(MF);
1608   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1609   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1610   SDValue StackSlot =
1611       DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()));
1612 
1613   SDValue Chain = DAG.getEntryNode();
1614   Lo = DAG.getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
1615 
1616   SDValue OffsetSlot =
1617       DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
1618   Hi = DAG.getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4), Align(8));
1619 
1620   Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
1621 
1622   SDVTList VTs = DAG.getVTList({VT, MVT::Other});
1623   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
1624   SDValue Ops[] = {Chain, IntID, StackSlot,
1625                    DAG.getRegister(RISCV::X0, MVT::i64), VL};
1626 
1627   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64,
1628                                  MPI, Align(8), MachineMemOperand::MOLoad);
1629 }
1630 
1631 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1632                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1633   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1634     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1635     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1636     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1637     // node in order to try and match RVV vector/scalar instructions.
1638     if ((LoC >> 31) == HiC)
1639       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1640   }
1641 
1642   // Fall back to a stack store and stride x0 vector load.
1643   return splatPartsI64ThroughStack(DL, VT, Lo, Hi, VL, DAG);
1644 }
1645 
1646 // Called by type legalization to handle splat of i64 on RV32.
1647 // FIXME: We can optimize this when the type has sign or zero bits in one
1648 // of the halves.
1649 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1650                                    SDValue VL, SelectionDAG &DAG) {
1651   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1652   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1653                            DAG.getConstant(0, DL, MVT::i32));
1654   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1655                            DAG.getConstant(1, DL, MVT::i32));
1656   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1657 }
1658 
1659 // This function lowers a splat of a scalar operand Splat with the vector
1660 // length VL. It ensures the final sequence is type legal, which is useful when
1661 // lowering a splat after type legalization.
1662 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1663                                 SelectionDAG &DAG,
1664                                 const RISCVSubtarget &Subtarget) {
1665   if (VT.isFloatingPoint())
1666     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1667 
1668   MVT XLenVT = Subtarget.getXLenVT();
1669 
1670   // Simplest case is that the operand needs to be promoted to XLenVT.
1671   if (Scalar.getValueType().bitsLE(XLenVT)) {
1672     // If the operand is a constant, sign extend to increase our chances
1673     // of being able to use a .vi instruction. ANY_EXTEND would become a
1674     // a zero extend and the simm5 check in isel would fail.
1675     // FIXME: Should we ignore the upper bits in isel instead?
1676     unsigned ExtOpc =
1677         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1678     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1679     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1680   }
1681 
1682   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1683          "Unexpected scalar for splat lowering!");
1684 
1685   // Otherwise use the more complicated splatting algorithm.
1686   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1687 }
1688 
1689 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1690                                    const RISCVSubtarget &Subtarget) {
1691   SDValue V1 = Op.getOperand(0);
1692   SDValue V2 = Op.getOperand(1);
1693   SDLoc DL(Op);
1694   MVT XLenVT = Subtarget.getXLenVT();
1695   MVT VT = Op.getSimpleValueType();
1696   unsigned NumElts = VT.getVectorNumElements();
1697   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1698 
1699   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1700 
1701   SDValue TrueMask, VL;
1702   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1703 
1704   if (SVN->isSplat()) {
1705     const int Lane = SVN->getSplatIndex();
1706     if (Lane >= 0) {
1707       MVT SVT = VT.getVectorElementType();
1708 
1709       // Turn splatted vector load into a strided load with an X0 stride.
1710       SDValue V = V1;
1711       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1712       // with undef.
1713       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1714       int Offset = Lane;
1715       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1716         int OpElements =
1717             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1718         V = V.getOperand(Offset / OpElements);
1719         Offset %= OpElements;
1720       }
1721 
1722       // We need to ensure the load isn't atomic or volatile.
1723       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1724         auto *Ld = cast<LoadSDNode>(V);
1725         Offset *= SVT.getStoreSize();
1726         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1727                                                    TypeSize::Fixed(Offset), DL);
1728 
1729         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1730         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1731           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1732           SDValue IntID =
1733               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1734           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1735                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1736           SDValue NewLoad = DAG.getMemIntrinsicNode(
1737               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1738               DAG.getMachineFunction().getMachineMemOperand(
1739                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1740           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1741           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1742         }
1743 
1744         // Otherwise use a scalar load and splat. This will give the best
1745         // opportunity to fold a splat into the operation. ISel can turn it into
1746         // the x0 strided load if we aren't able to fold away the select.
1747         if (SVT.isFloatingPoint())
1748           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1749                           Ld->getPointerInfo().getWithOffset(Offset),
1750                           Ld->getOriginalAlign(),
1751                           Ld->getMemOperand()->getFlags());
1752         else
1753           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1754                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1755                              Ld->getOriginalAlign(),
1756                              Ld->getMemOperand()->getFlags());
1757         DAG.makeEquivalentMemoryOrdering(Ld, V);
1758 
1759         unsigned Opc =
1760             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1761         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1762         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1763       }
1764 
1765       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1766       assert(Lane < (int)NumElts && "Unexpected lane!");
1767       SDValue Gather =
1768           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1769                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1770       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1771     }
1772   }
1773 
1774   // Detect shuffles which can be re-expressed as vector selects; these are
1775   // shuffles in which each element in the destination is taken from an element
1776   // at the corresponding index in either source vectors.
1777   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1778     int MaskIndex = MaskIdx.value();
1779     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1780   });
1781 
1782   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1783 
1784   SmallVector<SDValue> MaskVals;
1785   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1786   // merged with a second vrgather.
1787   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1788 
1789   // By default we preserve the original operand order, and use a mask to
1790   // select LHS as true and RHS as false. However, since RVV vector selects may
1791   // feature splats but only on the LHS, we may choose to invert our mask and
1792   // instead select between RHS and LHS.
1793   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1794   bool InvertMask = IsSelect == SwapOps;
1795 
1796   // Now construct the mask that will be used by the vselect or blended
1797   // vrgather operation. For vrgathers, construct the appropriate indices into
1798   // each vector.
1799   for (int MaskIndex : SVN->getMask()) {
1800     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1801     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1802     if (!IsSelect) {
1803       bool IsLHS = MaskIndex < (int)NumElts;
1804       // For "undef" elements of -1, shuffle in element 0 instead.
1805       GatherIndicesLHS.push_back(
1806           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1807       // TODO: If we're masking out unused elements anyway, it might produce
1808       // better code if we use the most-common element index instead of 0.
1809       GatherIndicesRHS.push_back(
1810           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1811     }
1812   }
1813 
1814   if (SwapOps) {
1815     std::swap(V1, V2);
1816     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1817   }
1818 
1819   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1820   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1821   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1822 
1823   if (IsSelect)
1824     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1825 
1826   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1827     // On such a large vector we're unable to use i8 as the index type.
1828     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1829     // may involve vector splitting if we're already at LMUL=8, or our
1830     // user-supplied maximum fixed-length LMUL.
1831     return SDValue();
1832   }
1833 
1834   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1835   MVT IndexVT = VT.changeTypeToInteger();
1836   // Since we can't introduce illegal index types at this stage, use i16 and
1837   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1838   // than XLenVT.
1839   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1840     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1841     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1842   }
1843 
1844   MVT IndexContainerVT =
1845       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1846 
1847   SDValue Gather;
1848   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1849   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1850   if (SDValue SplatValue = DAG.getSplatValue(V1)) {
1851     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1852   } else {
1853     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1854     LHSIndices =
1855         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1856 
1857     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1858     Gather =
1859         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1860   }
1861 
1862   // If a second vector operand is used by this shuffle, blend it in with an
1863   // additional vrgather.
1864   if (!V2.isUndef()) {
1865     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1866     SelectMask =
1867         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1868 
1869     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1870     RHSIndices =
1871         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1872 
1873     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1874     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1875     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1876                          Gather, VL);
1877   }
1878 
1879   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1880 }
1881 
1882 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1883                                      SDLoc DL, SelectionDAG &DAG,
1884                                      const RISCVSubtarget &Subtarget) {
1885   if (VT.isScalableVector())
1886     return DAG.getFPExtendOrRound(Op, DL, VT);
1887   assert(VT.isFixedLengthVector() &&
1888          "Unexpected value type for RVV FP extend/round lowering");
1889   SDValue Mask, VL;
1890   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1891   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1892                         ? RISCVISD::FP_EXTEND_VL
1893                         : RISCVISD::FP_ROUND_VL;
1894   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1895 }
1896 
1897 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1898                                             SelectionDAG &DAG) const {
1899   switch (Op.getOpcode()) {
1900   default:
1901     report_fatal_error("unimplemented operand");
1902   case ISD::GlobalAddress:
1903     return lowerGlobalAddress(Op, DAG);
1904   case ISD::BlockAddress:
1905     return lowerBlockAddress(Op, DAG);
1906   case ISD::ConstantPool:
1907     return lowerConstantPool(Op, DAG);
1908   case ISD::JumpTable:
1909     return lowerJumpTable(Op, DAG);
1910   case ISD::GlobalTLSAddress:
1911     return lowerGlobalTLSAddress(Op, DAG);
1912   case ISD::SELECT:
1913     return lowerSELECT(Op, DAG);
1914   case ISD::BRCOND:
1915     return lowerBRCOND(Op, DAG);
1916   case ISD::VASTART:
1917     return lowerVASTART(Op, DAG);
1918   case ISD::FRAMEADDR:
1919     return lowerFRAMEADDR(Op, DAG);
1920   case ISD::RETURNADDR:
1921     return lowerRETURNADDR(Op, DAG);
1922   case ISD::SHL_PARTS:
1923     return lowerShiftLeftParts(Op, DAG);
1924   case ISD::SRA_PARTS:
1925     return lowerShiftRightParts(Op, DAG, true);
1926   case ISD::SRL_PARTS:
1927     return lowerShiftRightParts(Op, DAG, false);
1928   case ISD::BITCAST: {
1929     SDLoc DL(Op);
1930     EVT VT = Op.getValueType();
1931     SDValue Op0 = Op.getOperand(0);
1932     EVT Op0VT = Op0.getValueType();
1933     MVT XLenVT = Subtarget.getXLenVT();
1934     if (VT.isFixedLengthVector()) {
1935       // We can handle fixed length vector bitcasts with a simple replacement
1936       // in isel.
1937       if (Op0VT.isFixedLengthVector())
1938         return Op;
1939       // When bitcasting from scalar to fixed-length vector, insert the scalar
1940       // into a one-element vector of the result type, and perform a vector
1941       // bitcast.
1942       if (!Op0VT.isVector()) {
1943         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
1944         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
1945                                               DAG.getUNDEF(BVT), Op0,
1946                                               DAG.getConstant(0, DL, XLenVT)));
1947       }
1948       return SDValue();
1949     }
1950     // Custom-legalize bitcasts from fixed-length vector types to scalar types
1951     // thus: bitcast the vector to a one-element vector type whose element type
1952     // is the same as the result type, and extract the first element.
1953     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
1954       LLVMContext &Context = *DAG.getContext();
1955       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
1956       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
1957                          DAG.getConstant(0, DL, XLenVT));
1958     }
1959     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
1960       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
1961       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1962       return FPConv;
1963     }
1964     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
1965         Subtarget.hasStdExtF()) {
1966       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1967       SDValue FPConv =
1968           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1969       return FPConv;
1970     }
1971     return SDValue();
1972   }
1973   case ISD::INTRINSIC_WO_CHAIN:
1974     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1975   case ISD::INTRINSIC_W_CHAIN:
1976     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1977   case ISD::BSWAP:
1978   case ISD::BITREVERSE: {
1979     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1980     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1981     MVT VT = Op.getSimpleValueType();
1982     SDLoc DL(Op);
1983     // Start with the maximum immediate value which is the bitwidth - 1.
1984     unsigned Imm = VT.getSizeInBits() - 1;
1985     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1986     if (Op.getOpcode() == ISD::BSWAP)
1987       Imm &= ~0x7U;
1988     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
1989                        DAG.getConstant(Imm, DL, VT));
1990   }
1991   case ISD::FSHL:
1992   case ISD::FSHR: {
1993     MVT VT = Op.getSimpleValueType();
1994     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1995     SDLoc DL(Op);
1996     if (Op.getOperand(2).getOpcode() == ISD::Constant)
1997       return Op;
1998     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1999     // use log(XLen) bits. Mask the shift amount accordingly.
2000     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2001     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2002                                 DAG.getConstant(ShAmtWidth, DL, VT));
2003     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2004     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2005   }
2006   case ISD::TRUNCATE: {
2007     SDLoc DL(Op);
2008     MVT VT = Op.getSimpleValueType();
2009     // Only custom-lower vector truncates
2010     if (!VT.isVector())
2011       return Op;
2012 
2013     // Truncates to mask types are handled differently
2014     if (VT.getVectorElementType() == MVT::i1)
2015       return lowerVectorMaskTrunc(Op, DAG);
2016 
2017     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2018     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2019     // truncate by one power of two at a time.
2020     MVT DstEltVT = VT.getVectorElementType();
2021 
2022     SDValue Src = Op.getOperand(0);
2023     MVT SrcVT = Src.getSimpleValueType();
2024     MVT SrcEltVT = SrcVT.getVectorElementType();
2025 
2026     assert(DstEltVT.bitsLT(SrcEltVT) &&
2027            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2028            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2029            "Unexpected vector truncate lowering");
2030 
2031     MVT ContainerVT = SrcVT;
2032     if (SrcVT.isFixedLengthVector()) {
2033       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2034       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2035     }
2036 
2037     SDValue Result = Src;
2038     SDValue Mask, VL;
2039     std::tie(Mask, VL) =
2040         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2041     LLVMContext &Context = *DAG.getContext();
2042     const ElementCount Count = ContainerVT.getVectorElementCount();
2043     do {
2044       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2045       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2046       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2047                            Mask, VL);
2048     } while (SrcEltVT != DstEltVT);
2049 
2050     if (SrcVT.isFixedLengthVector())
2051       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2052 
2053     return Result;
2054   }
2055   case ISD::ANY_EXTEND:
2056   case ISD::ZERO_EXTEND:
2057     if (Op.getOperand(0).getValueType().isVector() &&
2058         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2059       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2060     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2061   case ISD::SIGN_EXTEND:
2062     if (Op.getOperand(0).getValueType().isVector() &&
2063         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2064       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2065     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2066   case ISD::SPLAT_VECTOR_PARTS:
2067     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2068   case ISD::INSERT_VECTOR_ELT:
2069     return lowerINSERT_VECTOR_ELT(Op, DAG);
2070   case ISD::EXTRACT_VECTOR_ELT:
2071     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2072   case ISD::VSCALE: {
2073     MVT VT = Op.getSimpleValueType();
2074     SDLoc DL(Op);
2075     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2076     // We define our scalable vector types for lmul=1 to use a 64 bit known
2077     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2078     // vscale as VLENB / 8.
2079     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2080     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2081                                  DAG.getConstant(3, DL, VT));
2082     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2083   }
2084   case ISD::FP_EXTEND: {
2085     // RVV can only do fp_extend to types double the size as the source. We
2086     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2087     // via f32.
2088     SDLoc DL(Op);
2089     MVT VT = Op.getSimpleValueType();
2090     SDValue Src = Op.getOperand(0);
2091     MVT SrcVT = Src.getSimpleValueType();
2092 
2093     // Prepare any fixed-length vector operands.
2094     MVT ContainerVT = VT;
2095     if (SrcVT.isFixedLengthVector()) {
2096       ContainerVT = getContainerForFixedLengthVector(VT);
2097       MVT SrcContainerVT =
2098           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2099       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2100     }
2101 
2102     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2103         SrcVT.getVectorElementType() != MVT::f16) {
2104       // For scalable vectors, we only need to close the gap between
2105       // vXf16->vXf64.
2106       if (!VT.isFixedLengthVector())
2107         return Op;
2108       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2109       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2110       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2111     }
2112 
2113     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2114     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2115     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2116         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2117 
2118     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2119                                            DL, DAG, Subtarget);
2120     if (VT.isFixedLengthVector())
2121       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2122     return Extend;
2123   }
2124   case ISD::FP_ROUND: {
2125     // RVV can only do fp_round to types half the size as the source. We
2126     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2127     // conversion instruction.
2128     SDLoc DL(Op);
2129     MVT VT = Op.getSimpleValueType();
2130     SDValue Src = Op.getOperand(0);
2131     MVT SrcVT = Src.getSimpleValueType();
2132 
2133     // Prepare any fixed-length vector operands.
2134     MVT ContainerVT = VT;
2135     if (VT.isFixedLengthVector()) {
2136       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2137       ContainerVT =
2138           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2139       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2140     }
2141 
2142     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2143         SrcVT.getVectorElementType() != MVT::f64) {
2144       // For scalable vectors, we only need to close the gap between
2145       // vXf64<->vXf16.
2146       if (!VT.isFixedLengthVector())
2147         return Op;
2148       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2149       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2150       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2151     }
2152 
2153     SDValue Mask, VL;
2154     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2155 
2156     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2157     SDValue IntermediateRound =
2158         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2159     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2160                                           DL, DAG, Subtarget);
2161 
2162     if (VT.isFixedLengthVector())
2163       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2164     return Round;
2165   }
2166   case ISD::FP_TO_SINT:
2167   case ISD::FP_TO_UINT:
2168   case ISD::SINT_TO_FP:
2169   case ISD::UINT_TO_FP: {
2170     // RVV can only do fp<->int conversions to types half/double the size as
2171     // the source. We custom-lower any conversions that do two hops into
2172     // sequences.
2173     MVT VT = Op.getSimpleValueType();
2174     if (!VT.isVector())
2175       return Op;
2176     SDLoc DL(Op);
2177     SDValue Src = Op.getOperand(0);
2178     MVT EltVT = VT.getVectorElementType();
2179     MVT SrcVT = Src.getSimpleValueType();
2180     MVT SrcEltVT = SrcVT.getVectorElementType();
2181     unsigned EltSize = EltVT.getSizeInBits();
2182     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2183     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2184            "Unexpected vector element types");
2185 
2186     bool IsInt2FP = SrcEltVT.isInteger();
2187     // Widening conversions
2188     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2189       if (IsInt2FP) {
2190         // Do a regular integer sign/zero extension then convert to float.
2191         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2192                                       VT.getVectorElementCount());
2193         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2194                                  ? ISD::ZERO_EXTEND
2195                                  : ISD::SIGN_EXTEND;
2196         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2197         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2198       }
2199       // FP2Int
2200       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2201       // Do one doubling fp_extend then complete the operation by converting
2202       // to int.
2203       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2204       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2205       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2206     }
2207 
2208     // Narrowing conversions
2209     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2210       if (IsInt2FP) {
2211         // One narrowing int_to_fp, then an fp_round.
2212         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2213         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2214         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2215         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2216       }
2217       // FP2Int
2218       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2219       // representable by the integer, the result is poison.
2220       MVT IVecVT =
2221           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2222                            VT.getVectorElementCount());
2223       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2224       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2225     }
2226 
2227     // Scalable vectors can exit here. Patterns will handle equally-sized
2228     // conversions halving/doubling ones.
2229     if (!VT.isFixedLengthVector())
2230       return Op;
2231 
2232     // For fixed-length vectors we lower to a custom "VL" node.
2233     unsigned RVVOpc = 0;
2234     switch (Op.getOpcode()) {
2235     default:
2236       llvm_unreachable("Impossible opcode");
2237     case ISD::FP_TO_SINT:
2238       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2239       break;
2240     case ISD::FP_TO_UINT:
2241       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2242       break;
2243     case ISD::SINT_TO_FP:
2244       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2245       break;
2246     case ISD::UINT_TO_FP:
2247       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2248       break;
2249     }
2250 
2251     MVT ContainerVT, SrcContainerVT;
2252     // Derive the reference container type from the larger vector type.
2253     if (SrcEltSize > EltSize) {
2254       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2255       ContainerVT =
2256           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2257     } else {
2258       ContainerVT = getContainerForFixedLengthVector(VT);
2259       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2260     }
2261 
2262     SDValue Mask, VL;
2263     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2264 
2265     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2266     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2267     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2268   }
2269   case ISD::VECREDUCE_ADD:
2270   case ISD::VECREDUCE_UMAX:
2271   case ISD::VECREDUCE_SMAX:
2272   case ISD::VECREDUCE_UMIN:
2273   case ISD::VECREDUCE_SMIN:
2274     return lowerVECREDUCE(Op, DAG);
2275   case ISD::VECREDUCE_AND:
2276   case ISD::VECREDUCE_OR:
2277   case ISD::VECREDUCE_XOR:
2278     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2279       return lowerVectorMaskVECREDUCE(Op, DAG);
2280     return lowerVECREDUCE(Op, DAG);
2281   case ISD::VECREDUCE_FADD:
2282   case ISD::VECREDUCE_SEQ_FADD:
2283   case ISD::VECREDUCE_FMIN:
2284   case ISD::VECREDUCE_FMAX:
2285     return lowerFPVECREDUCE(Op, DAG);
2286   case ISD::INSERT_SUBVECTOR:
2287     return lowerINSERT_SUBVECTOR(Op, DAG);
2288   case ISD::EXTRACT_SUBVECTOR:
2289     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2290   case ISD::STEP_VECTOR:
2291     return lowerSTEP_VECTOR(Op, DAG);
2292   case ISD::VECTOR_REVERSE:
2293     return lowerVECTOR_REVERSE(Op, DAG);
2294   case ISD::BUILD_VECTOR:
2295     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2296   case ISD::SPLAT_VECTOR:
2297     if (Op.getValueType().getVectorElementType() == MVT::i1)
2298       return lowerVectorMaskSplat(Op, DAG);
2299     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2300   case ISD::VECTOR_SHUFFLE:
2301     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2302   case ISD::CONCAT_VECTORS: {
2303     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2304     // better than going through the stack, as the default expansion does.
2305     SDLoc DL(Op);
2306     MVT VT = Op.getSimpleValueType();
2307     unsigned NumOpElts =
2308         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2309     SDValue Vec = DAG.getUNDEF(VT);
2310     for (const auto &OpIdx : enumerate(Op->ops()))
2311       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2312                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2313     return Vec;
2314   }
2315   case ISD::LOAD:
2316     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2317   case ISD::STORE:
2318     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2319   case ISD::MLOAD:
2320     return lowerMLOAD(Op, DAG);
2321   case ISD::MSTORE:
2322     return lowerMSTORE(Op, DAG);
2323   case ISD::SETCC:
2324     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2325   case ISD::ADD:
2326     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2327   case ISD::SUB:
2328     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2329   case ISD::MUL:
2330     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2331   case ISD::MULHS:
2332     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2333   case ISD::MULHU:
2334     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2335   case ISD::AND:
2336     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2337                                               RISCVISD::AND_VL);
2338   case ISD::OR:
2339     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2340                                               RISCVISD::OR_VL);
2341   case ISD::XOR:
2342     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2343                                               RISCVISD::XOR_VL);
2344   case ISD::SDIV:
2345     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2346   case ISD::SREM:
2347     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2348   case ISD::UDIV:
2349     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2350   case ISD::UREM:
2351     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2352   case ISD::SHL:
2353     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2354   case ISD::SRA:
2355     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2356   case ISD::SRL:
2357     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2358   case ISD::FADD:
2359     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2360   case ISD::FSUB:
2361     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2362   case ISD::FMUL:
2363     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2364   case ISD::FDIV:
2365     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2366   case ISD::FNEG:
2367     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2368   case ISD::FABS:
2369     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2370   case ISD::FSQRT:
2371     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2372   case ISD::FMA:
2373     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2374   case ISD::SMIN:
2375     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2376   case ISD::SMAX:
2377     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2378   case ISD::UMIN:
2379     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2380   case ISD::UMAX:
2381     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2382   case ISD::FMINNUM:
2383     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2384   case ISD::FMAXNUM:
2385     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2386   case ISD::ABS:
2387     return lowerABS(Op, DAG);
2388   case ISD::VSELECT:
2389     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2390   case ISD::FCOPYSIGN:
2391     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2392   case ISD::MGATHER:
2393     return lowerMGATHER(Op, DAG);
2394   case ISD::MSCATTER:
2395     return lowerMSCATTER(Op, DAG);
2396   case ISD::FLT_ROUNDS_:
2397     return lowerGET_ROUNDING(Op, DAG);
2398   case ISD::SET_ROUNDING:
2399     return lowerSET_ROUNDING(Op, DAG);
2400   case ISD::VP_ADD:
2401     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2402   case ISD::VP_SUB:
2403     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2404   case ISD::VP_MUL:
2405     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2406   case ISD::VP_SDIV:
2407     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2408   case ISD::VP_UDIV:
2409     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2410   case ISD::VP_SREM:
2411     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2412   case ISD::VP_UREM:
2413     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2414   case ISD::VP_AND:
2415     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2416   case ISD::VP_OR:
2417     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2418   case ISD::VP_XOR:
2419     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2420   case ISD::VP_ASHR:
2421     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2422   case ISD::VP_LSHR:
2423     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2424   case ISD::VP_SHL:
2425     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2426   }
2427 }
2428 
2429 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2430                              SelectionDAG &DAG, unsigned Flags) {
2431   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2432 }
2433 
2434 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2435                              SelectionDAG &DAG, unsigned Flags) {
2436   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2437                                    Flags);
2438 }
2439 
2440 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2441                              SelectionDAG &DAG, unsigned Flags) {
2442   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2443                                    N->getOffset(), Flags);
2444 }
2445 
2446 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2447                              SelectionDAG &DAG, unsigned Flags) {
2448   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2449 }
2450 
2451 template <class NodeTy>
2452 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2453                                      bool IsLocal) const {
2454   SDLoc DL(N);
2455   EVT Ty = getPointerTy(DAG.getDataLayout());
2456 
2457   if (isPositionIndependent()) {
2458     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2459     if (IsLocal)
2460       // Use PC-relative addressing to access the symbol. This generates the
2461       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2462       // %pcrel_lo(auipc)).
2463       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2464 
2465     // Use PC-relative addressing to access the GOT for this symbol, then load
2466     // the address from the GOT. This generates the pattern (PseudoLA sym),
2467     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2468     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2469   }
2470 
2471   switch (getTargetMachine().getCodeModel()) {
2472   default:
2473     report_fatal_error("Unsupported code model for lowering");
2474   case CodeModel::Small: {
2475     // Generate a sequence for accessing addresses within the first 2 GiB of
2476     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2477     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2478     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2479     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2480     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2481   }
2482   case CodeModel::Medium: {
2483     // Generate a sequence for accessing addresses within any 2GiB range within
2484     // the address space. This generates the pattern (PseudoLLA sym), which
2485     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2486     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2487     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2488   }
2489   }
2490 }
2491 
2492 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2493                                                 SelectionDAG &DAG) const {
2494   SDLoc DL(Op);
2495   EVT Ty = Op.getValueType();
2496   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2497   int64_t Offset = N->getOffset();
2498   MVT XLenVT = Subtarget.getXLenVT();
2499 
2500   const GlobalValue *GV = N->getGlobal();
2501   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2502   SDValue Addr = getAddr(N, DAG, IsLocal);
2503 
2504   // In order to maximise the opportunity for common subexpression elimination,
2505   // emit a separate ADD node for the global address offset instead of folding
2506   // it in the global address node. Later peephole optimisations may choose to
2507   // fold it back in when profitable.
2508   if (Offset != 0)
2509     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2510                        DAG.getConstant(Offset, DL, XLenVT));
2511   return Addr;
2512 }
2513 
2514 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2515                                                SelectionDAG &DAG) const {
2516   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2517 
2518   return getAddr(N, DAG);
2519 }
2520 
2521 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2522                                                SelectionDAG &DAG) const {
2523   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2524 
2525   return getAddr(N, DAG);
2526 }
2527 
2528 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2529                                             SelectionDAG &DAG) const {
2530   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2531 
2532   return getAddr(N, DAG);
2533 }
2534 
2535 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2536                                               SelectionDAG &DAG,
2537                                               bool UseGOT) const {
2538   SDLoc DL(N);
2539   EVT Ty = getPointerTy(DAG.getDataLayout());
2540   const GlobalValue *GV = N->getGlobal();
2541   MVT XLenVT = Subtarget.getXLenVT();
2542 
2543   if (UseGOT) {
2544     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2545     // load the address from the GOT and add the thread pointer. This generates
2546     // the pattern (PseudoLA_TLS_IE sym), which expands to
2547     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2548     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2549     SDValue Load =
2550         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2551 
2552     // Add the thread pointer.
2553     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2554     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2555   }
2556 
2557   // Generate a sequence for accessing the address relative to the thread
2558   // pointer, with the appropriate adjustment for the thread pointer offset.
2559   // This generates the pattern
2560   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2561   SDValue AddrHi =
2562       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2563   SDValue AddrAdd =
2564       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2565   SDValue AddrLo =
2566       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2567 
2568   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2569   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2570   SDValue MNAdd = SDValue(
2571       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2572       0);
2573   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2574 }
2575 
2576 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2577                                                SelectionDAG &DAG) const {
2578   SDLoc DL(N);
2579   EVT Ty = getPointerTy(DAG.getDataLayout());
2580   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2581   const GlobalValue *GV = N->getGlobal();
2582 
2583   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2584   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2585   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2586   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2587   SDValue Load =
2588       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2589 
2590   // Prepare argument list to generate call.
2591   ArgListTy Args;
2592   ArgListEntry Entry;
2593   Entry.Node = Load;
2594   Entry.Ty = CallTy;
2595   Args.push_back(Entry);
2596 
2597   // Setup call to __tls_get_addr.
2598   TargetLowering::CallLoweringInfo CLI(DAG);
2599   CLI.setDebugLoc(DL)
2600       .setChain(DAG.getEntryNode())
2601       .setLibCallee(CallingConv::C, CallTy,
2602                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2603                     std::move(Args));
2604 
2605   return LowerCallTo(CLI).first;
2606 }
2607 
2608 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2609                                                    SelectionDAG &DAG) const {
2610   SDLoc DL(Op);
2611   EVT Ty = Op.getValueType();
2612   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2613   int64_t Offset = N->getOffset();
2614   MVT XLenVT = Subtarget.getXLenVT();
2615 
2616   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2617 
2618   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2619       CallingConv::GHC)
2620     report_fatal_error("In GHC calling convention TLS is not supported");
2621 
2622   SDValue Addr;
2623   switch (Model) {
2624   case TLSModel::LocalExec:
2625     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2626     break;
2627   case TLSModel::InitialExec:
2628     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2629     break;
2630   case TLSModel::LocalDynamic:
2631   case TLSModel::GeneralDynamic:
2632     Addr = getDynamicTLSAddr(N, DAG);
2633     break;
2634   }
2635 
2636   // In order to maximise the opportunity for common subexpression elimination,
2637   // emit a separate ADD node for the global address offset instead of folding
2638   // it in the global address node. Later peephole optimisations may choose to
2639   // fold it back in when profitable.
2640   if (Offset != 0)
2641     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2642                        DAG.getConstant(Offset, DL, XLenVT));
2643   return Addr;
2644 }
2645 
2646 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2647   SDValue CondV = Op.getOperand(0);
2648   SDValue TrueV = Op.getOperand(1);
2649   SDValue FalseV = Op.getOperand(2);
2650   SDLoc DL(Op);
2651   MVT XLenVT = Subtarget.getXLenVT();
2652 
2653   // If the result type is XLenVT and CondV is the output of a SETCC node
2654   // which also operated on XLenVT inputs, then merge the SETCC node into the
2655   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2656   // compare+branch instructions. i.e.:
2657   // (select (setcc lhs, rhs, cc), truev, falsev)
2658   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2659   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2660       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2661     SDValue LHS = CondV.getOperand(0);
2662     SDValue RHS = CondV.getOperand(1);
2663     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2664     ISD::CondCode CCVal = CC->get();
2665 
2666     // Special case for a select of 2 constants that have a diffence of 1.
2667     // Normally this is done by DAGCombine, but if the select is introduced by
2668     // type legalization or op legalization, we miss it. Restricting to SETLT
2669     // case for now because that is what signed saturating add/sub need.
2670     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2671     // but we would probably want to swap the true/false values if the condition
2672     // is SETGE/SETLE to avoid an XORI.
2673     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2674         CCVal == ISD::SETLT) {
2675       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2676       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2677       if (TrueVal - 1 == FalseVal)
2678         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2679       if (TrueVal + 1 == FalseVal)
2680         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2681     }
2682 
2683     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2684 
2685     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2686     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2687     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2688   }
2689 
2690   // Otherwise:
2691   // (select condv, truev, falsev)
2692   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2693   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2694   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2695 
2696   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2697 
2698   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2699 }
2700 
2701 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2702   SDValue CondV = Op.getOperand(1);
2703   SDLoc DL(Op);
2704   MVT XLenVT = Subtarget.getXLenVT();
2705 
2706   if (CondV.getOpcode() == ISD::SETCC &&
2707       CondV.getOperand(0).getValueType() == XLenVT) {
2708     SDValue LHS = CondV.getOperand(0);
2709     SDValue RHS = CondV.getOperand(1);
2710     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2711 
2712     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2713 
2714     SDValue TargetCC = DAG.getCondCode(CCVal);
2715     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2716                        LHS, RHS, TargetCC, Op.getOperand(2));
2717   }
2718 
2719   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2720                      CondV, DAG.getConstant(0, DL, XLenVT),
2721                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2722 }
2723 
2724 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2725   MachineFunction &MF = DAG.getMachineFunction();
2726   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2727 
2728   SDLoc DL(Op);
2729   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2730                                  getPointerTy(MF.getDataLayout()));
2731 
2732   // vastart just stores the address of the VarArgsFrameIndex slot into the
2733   // memory location argument.
2734   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2735   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2736                       MachinePointerInfo(SV));
2737 }
2738 
2739 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2740                                             SelectionDAG &DAG) const {
2741   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2742   MachineFunction &MF = DAG.getMachineFunction();
2743   MachineFrameInfo &MFI = MF.getFrameInfo();
2744   MFI.setFrameAddressIsTaken(true);
2745   Register FrameReg = RI.getFrameRegister(MF);
2746   int XLenInBytes = Subtarget.getXLen() / 8;
2747 
2748   EVT VT = Op.getValueType();
2749   SDLoc DL(Op);
2750   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2751   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2752   while (Depth--) {
2753     int Offset = -(XLenInBytes * 2);
2754     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2755                               DAG.getIntPtrConstant(Offset, DL));
2756     FrameAddr =
2757         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2758   }
2759   return FrameAddr;
2760 }
2761 
2762 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2763                                              SelectionDAG &DAG) const {
2764   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2765   MachineFunction &MF = DAG.getMachineFunction();
2766   MachineFrameInfo &MFI = MF.getFrameInfo();
2767   MFI.setReturnAddressIsTaken(true);
2768   MVT XLenVT = Subtarget.getXLenVT();
2769   int XLenInBytes = Subtarget.getXLen() / 8;
2770 
2771   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2772     return SDValue();
2773 
2774   EVT VT = Op.getValueType();
2775   SDLoc DL(Op);
2776   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2777   if (Depth) {
2778     int Off = -XLenInBytes;
2779     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2780     SDValue Offset = DAG.getConstant(Off, DL, VT);
2781     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2782                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2783                        MachinePointerInfo());
2784   }
2785 
2786   // Return the value of the return address register, marking it an implicit
2787   // live-in.
2788   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2789   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2790 }
2791 
2792 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2793                                                  SelectionDAG &DAG) const {
2794   SDLoc DL(Op);
2795   SDValue Lo = Op.getOperand(0);
2796   SDValue Hi = Op.getOperand(1);
2797   SDValue Shamt = Op.getOperand(2);
2798   EVT VT = Lo.getValueType();
2799 
2800   // if Shamt-XLEN < 0: // Shamt < XLEN
2801   //   Lo = Lo << Shamt
2802   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2803   // else:
2804   //   Lo = 0
2805   //   Hi = Lo << (Shamt-XLEN)
2806 
2807   SDValue Zero = DAG.getConstant(0, DL, VT);
2808   SDValue One = DAG.getConstant(1, DL, VT);
2809   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2810   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2811   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2812   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2813 
2814   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2815   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2816   SDValue ShiftRightLo =
2817       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2818   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2819   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2820   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2821 
2822   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2823 
2824   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2825   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2826 
2827   SDValue Parts[2] = {Lo, Hi};
2828   return DAG.getMergeValues(Parts, DL);
2829 }
2830 
2831 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2832                                                   bool IsSRA) const {
2833   SDLoc DL(Op);
2834   SDValue Lo = Op.getOperand(0);
2835   SDValue Hi = Op.getOperand(1);
2836   SDValue Shamt = Op.getOperand(2);
2837   EVT VT = Lo.getValueType();
2838 
2839   // SRA expansion:
2840   //   if Shamt-XLEN < 0: // Shamt < XLEN
2841   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2842   //     Hi = Hi >>s Shamt
2843   //   else:
2844   //     Lo = Hi >>s (Shamt-XLEN);
2845   //     Hi = Hi >>s (XLEN-1)
2846   //
2847   // SRL expansion:
2848   //   if Shamt-XLEN < 0: // Shamt < XLEN
2849   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2850   //     Hi = Hi >>u Shamt
2851   //   else:
2852   //     Lo = Hi >>u (Shamt-XLEN);
2853   //     Hi = 0;
2854 
2855   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2856 
2857   SDValue Zero = DAG.getConstant(0, DL, VT);
2858   SDValue One = DAG.getConstant(1, DL, VT);
2859   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2860   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2861   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2862   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2863 
2864   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2865   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2866   SDValue ShiftLeftHi =
2867       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2868   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2869   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2870   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2871   SDValue HiFalse =
2872       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2873 
2874   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2875 
2876   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2877   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2878 
2879   SDValue Parts[2] = {Lo, Hi};
2880   return DAG.getMergeValues(Parts, DL);
2881 }
2882 
2883 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2884 // legal equivalently-sized i8 type, so we can use that as a go-between.
2885 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
2886                                                   SelectionDAG &DAG) const {
2887   SDLoc DL(Op);
2888   MVT VT = Op.getSimpleValueType();
2889   SDValue SplatVal = Op.getOperand(0);
2890   // All-zeros or all-ones splats are handled specially.
2891   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
2892     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2893     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
2894   }
2895   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
2896     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2897     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
2898   }
2899   MVT XLenVT = Subtarget.getXLenVT();
2900   assert(SplatVal.getValueType() == XLenVT &&
2901          "Unexpected type for i1 splat value");
2902   MVT InterVT = VT.changeVectorElementType(MVT::i8);
2903   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
2904                          DAG.getConstant(1, DL, XLenVT));
2905   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
2906   SDValue Zero = DAG.getConstant(0, DL, InterVT);
2907   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
2908 }
2909 
2910 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2911 // illegal (currently only vXi64 RV32).
2912 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2913 // them to SPLAT_VECTOR_I64
2914 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2915                                                      SelectionDAG &DAG) const {
2916   SDLoc DL(Op);
2917   MVT VecVT = Op.getSimpleValueType();
2918   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2919          "Unexpected SPLAT_VECTOR_PARTS lowering");
2920 
2921   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2922   SDValue Lo = Op.getOperand(0);
2923   SDValue Hi = Op.getOperand(1);
2924 
2925   if (VecVT.isFixedLengthVector()) {
2926     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2927     SDLoc DL(Op);
2928     SDValue Mask, VL;
2929     std::tie(Mask, VL) =
2930         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2931 
2932     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
2933     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
2934   }
2935 
2936   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2937     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2938     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2939     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2940     // node in order to try and match RVV vector/scalar instructions.
2941     if ((LoC >> 31) == HiC)
2942       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2943   }
2944 
2945   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
2946   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
2947       isa<ConstantSDNode>(Hi.getOperand(1)) &&
2948       Hi.getConstantOperandVal(1) == 31)
2949     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2950 
2951   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
2952   return splatPartsI64ThroughStack(DL, VecVT, Lo, Hi,
2953                                    DAG.getRegister(RISCV::X0, MVT::i64), DAG);
2954 }
2955 
2956 // Custom-lower extensions from mask vectors by using a vselect either with 1
2957 // for zero/any-extension or -1 for sign-extension:
2958 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2959 // Note that any-extension is lowered identically to zero-extension.
2960 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2961                                                 int64_t ExtTrueVal) const {
2962   SDLoc DL(Op);
2963   MVT VecVT = Op.getSimpleValueType();
2964   SDValue Src = Op.getOperand(0);
2965   // Only custom-lower extensions from mask types
2966   assert(Src.getValueType().isVector() &&
2967          Src.getValueType().getVectorElementType() == MVT::i1);
2968 
2969   MVT XLenVT = Subtarget.getXLenVT();
2970   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2971   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2972 
2973   if (VecVT.isScalableVector()) {
2974     // Be careful not to introduce illegal scalar types at this stage, and be
2975     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2976     // illegal and must be expanded. Since we know that the constants are
2977     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2978     bool IsRV32E64 =
2979         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2980 
2981     if (!IsRV32E64) {
2982       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2983       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2984     } else {
2985       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2986       SplatTrueVal =
2987           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2988     }
2989 
2990     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2991   }
2992 
2993   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2994   MVT I1ContainerVT =
2995       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2996 
2997   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2998 
2999   SDValue Mask, VL;
3000   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3001 
3002   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3003   SplatTrueVal =
3004       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3005   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3006                                SplatTrueVal, SplatZero, VL);
3007 
3008   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3009 }
3010 
3011 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3012     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3013   MVT ExtVT = Op.getSimpleValueType();
3014   // Only custom-lower extensions from fixed-length vector types.
3015   if (!ExtVT.isFixedLengthVector())
3016     return Op;
3017   MVT VT = Op.getOperand(0).getSimpleValueType();
3018   // Grab the canonical container type for the extended type. Infer the smaller
3019   // type from that to ensure the same number of vector elements, as we know
3020   // the LMUL will be sufficient to hold the smaller type.
3021   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3022   // Get the extended container type manually to ensure the same number of
3023   // vector elements between source and dest.
3024   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3025                                      ContainerExtVT.getVectorElementCount());
3026 
3027   SDValue Op1 =
3028       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3029 
3030   SDLoc DL(Op);
3031   SDValue Mask, VL;
3032   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3033 
3034   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3035 
3036   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3037 }
3038 
3039 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3040 // setcc operation:
3041 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3042 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3043                                                   SelectionDAG &DAG) const {
3044   SDLoc DL(Op);
3045   EVT MaskVT = Op.getValueType();
3046   // Only expect to custom-lower truncations to mask types
3047   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3048          "Unexpected type for vector mask lowering");
3049   SDValue Src = Op.getOperand(0);
3050   MVT VecVT = Src.getSimpleValueType();
3051 
3052   // If this is a fixed vector, we need to convert it to a scalable vector.
3053   MVT ContainerVT = VecVT;
3054   if (VecVT.isFixedLengthVector()) {
3055     ContainerVT = getContainerForFixedLengthVector(VecVT);
3056     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3057   }
3058 
3059   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3060   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3061 
3062   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3063   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3064 
3065   if (VecVT.isScalableVector()) {
3066     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3067     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3068   }
3069 
3070   SDValue Mask, VL;
3071   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3072 
3073   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3074   SDValue Trunc =
3075       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3076   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3077                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3078   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3079 }
3080 
3081 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3082 // first position of a vector, and that vector is slid up to the insert index.
3083 // By limiting the active vector length to index+1 and merging with the
3084 // original vector (with an undisturbed tail policy for elements >= VL), we
3085 // achieve the desired result of leaving all elements untouched except the one
3086 // at VL-1, which is replaced with the desired value.
3087 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3088                                                     SelectionDAG &DAG) const {
3089   SDLoc DL(Op);
3090   MVT VecVT = Op.getSimpleValueType();
3091   SDValue Vec = Op.getOperand(0);
3092   SDValue Val = Op.getOperand(1);
3093   SDValue Idx = Op.getOperand(2);
3094 
3095   MVT ContainerVT = VecVT;
3096   // If the operand is a fixed-length vector, convert to a scalable one.
3097   if (VecVT.isFixedLengthVector()) {
3098     ContainerVT = getContainerForFixedLengthVector(VecVT);
3099     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3100   }
3101 
3102   MVT XLenVT = Subtarget.getXLenVT();
3103 
3104   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3105   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3106   // Even i64-element vectors on RV32 can be lowered without scalar
3107   // legalization if the most-significant 32 bits of the value are not affected
3108   // by the sign-extension of the lower 32 bits.
3109   // TODO: We could also catch sign extensions of a 32-bit value.
3110   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3111     const auto *CVal = cast<ConstantSDNode>(Val);
3112     if (isInt<32>(CVal->getSExtValue())) {
3113       IsLegalInsert = true;
3114       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3115     }
3116   }
3117 
3118   SDValue Mask, VL;
3119   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3120 
3121   SDValue ValInVec;
3122 
3123   if (IsLegalInsert) {
3124     unsigned Opc =
3125         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3126     if (isNullConstant(Idx)) {
3127       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3128       if (!VecVT.isFixedLengthVector())
3129         return Vec;
3130       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3131     }
3132     ValInVec =
3133         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3134   } else {
3135     // On RV32, i64-element vectors must be specially handled to place the
3136     // value at element 0, by using two vslide1up instructions in sequence on
3137     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3138     // this.
3139     SDValue One = DAG.getConstant(1, DL, XLenVT);
3140     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3141     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3142     MVT I32ContainerVT =
3143         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3144     SDValue I32Mask =
3145         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3146     // Limit the active VL to two.
3147     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3148     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3149     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3150     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3151                            InsertI64VL);
3152     // First slide in the hi value, then the lo in underneath it.
3153     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3154                            ValHi, I32Mask, InsertI64VL);
3155     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3156                            ValLo, I32Mask, InsertI64VL);
3157     // Bitcast back to the right container type.
3158     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3159   }
3160 
3161   // Now that the value is in a vector, slide it into position.
3162   SDValue InsertVL =
3163       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3164   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3165                                 ValInVec, Idx, Mask, InsertVL);
3166   if (!VecVT.isFixedLengthVector())
3167     return Slideup;
3168   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3169 }
3170 
3171 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3172 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3173 // types this is done using VMV_X_S to allow us to glean information about the
3174 // sign bits of the result.
3175 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3176                                                      SelectionDAG &DAG) const {
3177   SDLoc DL(Op);
3178   SDValue Idx = Op.getOperand(1);
3179   SDValue Vec = Op.getOperand(0);
3180   EVT EltVT = Op.getValueType();
3181   MVT VecVT = Vec.getSimpleValueType();
3182   MVT XLenVT = Subtarget.getXLenVT();
3183 
3184   if (VecVT.getVectorElementType() == MVT::i1) {
3185     // FIXME: For now we just promote to an i8 vector and extract from that,
3186     // but this is probably not optimal.
3187     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3188     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3189     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3190   }
3191 
3192   // If this is a fixed vector, we need to convert it to a scalable vector.
3193   MVT ContainerVT = VecVT;
3194   if (VecVT.isFixedLengthVector()) {
3195     ContainerVT = getContainerForFixedLengthVector(VecVT);
3196     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3197   }
3198 
3199   // If the index is 0, the vector is already in the right position.
3200   if (!isNullConstant(Idx)) {
3201     // Use a VL of 1 to avoid processing more elements than we need.
3202     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3203     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3204     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3205     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3206                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3207   }
3208 
3209   if (!EltVT.isInteger()) {
3210     // Floating-point extracts are handled in TableGen.
3211     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3212                        DAG.getConstant(0, DL, XLenVT));
3213   }
3214 
3215   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3216   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3217 }
3218 
3219 // Some RVV intrinsics may claim that they want an integer operand to be
3220 // promoted or expanded.
3221 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3222                                           const RISCVSubtarget &Subtarget) {
3223   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3224           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3225          "Unexpected opcode");
3226 
3227   if (!Subtarget.hasStdExtV())
3228     return SDValue();
3229 
3230   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3231   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3232   SDLoc DL(Op);
3233 
3234   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3235       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3236   if (!II || !II->SplatOperand)
3237     return SDValue();
3238 
3239   unsigned SplatOp = II->SplatOperand + HasChain;
3240   assert(SplatOp < Op.getNumOperands());
3241 
3242   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3243   SDValue &ScalarOp = Operands[SplatOp];
3244   MVT OpVT = ScalarOp.getSimpleValueType();
3245   MVT XLenVT = Subtarget.getXLenVT();
3246 
3247   // If this isn't a scalar, or its type is XLenVT we're done.
3248   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3249     return SDValue();
3250 
3251   // Simplest case is that the operand needs to be promoted to XLenVT.
3252   if (OpVT.bitsLT(XLenVT)) {
3253     // If the operand is a constant, sign extend to increase our chances
3254     // of being able to use a .vi instruction. ANY_EXTEND would become a
3255     // a zero extend and the simm5 check in isel would fail.
3256     // FIXME: Should we ignore the upper bits in isel instead?
3257     unsigned ExtOpc =
3258         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3259     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3260     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3261   }
3262 
3263   // Use the previous operand to get the vXi64 VT. The result might be a mask
3264   // VT for compares. Using the previous operand assumes that the previous
3265   // operand will never have a smaller element size than a scalar operand and
3266   // that a widening operation never uses SEW=64.
3267   // NOTE: If this fails the below assert, we can probably just find the
3268   // element count from any operand or result and use it to construct the VT.
3269   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3270   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3271 
3272   // The more complex case is when the scalar is larger than XLenVT.
3273   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3274          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3275 
3276   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3277   // on the instruction to sign-extend since SEW>XLEN.
3278   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3279     if (isInt<32>(CVal->getSExtValue())) {
3280       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3281       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3282     }
3283   }
3284 
3285   // We need to convert the scalar to a splat vector.
3286   // FIXME: Can we implicitly truncate the scalar if it is known to
3287   // be sign extended?
3288   // VL should be the last operand.
3289   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3290   assert(VL.getValueType() == XLenVT);
3291   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3292   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3293 }
3294 
3295 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3296                                                      SelectionDAG &DAG) const {
3297   unsigned IntNo = Op.getConstantOperandVal(0);
3298   SDLoc DL(Op);
3299   MVT XLenVT = Subtarget.getXLenVT();
3300 
3301   switch (IntNo) {
3302   default:
3303     break; // Don't custom lower most intrinsics.
3304   case Intrinsic::thread_pointer: {
3305     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3306     return DAG.getRegister(RISCV::X4, PtrVT);
3307   }
3308   case Intrinsic::riscv_orc_b:
3309     // Lower to the GORCI encoding for orc.b.
3310     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3311                        DAG.getConstant(7, DL, XLenVT));
3312   case Intrinsic::riscv_grev:
3313   case Intrinsic::riscv_gorc: {
3314     unsigned Opc =
3315         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3316     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3317   }
3318   case Intrinsic::riscv_shfl:
3319   case Intrinsic::riscv_unshfl: {
3320     unsigned Opc =
3321         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3322     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3323   }
3324   case Intrinsic::riscv_bcompress:
3325   case Intrinsic::riscv_bdecompress: {
3326     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3327                                                        : RISCVISD::BDECOMPRESS;
3328     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3329   }
3330   case Intrinsic::riscv_vmv_x_s:
3331     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3332     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3333                        Op.getOperand(1));
3334   case Intrinsic::riscv_vmv_v_x:
3335     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3336                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3337   case Intrinsic::riscv_vfmv_v_f:
3338     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3339                        Op.getOperand(1), Op.getOperand(2));
3340   case Intrinsic::riscv_vmv_s_x: {
3341     SDValue Scalar = Op.getOperand(2);
3342 
3343     if (Scalar.getValueType().bitsLE(XLenVT)) {
3344       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3345       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3346                          Op.getOperand(1), Scalar, Op.getOperand(3));
3347     }
3348 
3349     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3350 
3351     // This is an i64 value that lives in two scalar registers. We have to
3352     // insert this in a convoluted way. First we build vXi64 splat containing
3353     // the/ two values that we assemble using some bit math. Next we'll use
3354     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3355     // to merge element 0 from our splat into the source vector.
3356     // FIXME: This is probably not the best way to do this, but it is
3357     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3358     // point.
3359     //   sw lo, (a0)
3360     //   sw hi, 4(a0)
3361     //   vlse vX, (a0)
3362     //
3363     //   vid.v      vVid
3364     //   vmseq.vx   mMask, vVid, 0
3365     //   vmerge.vvm vDest, vSrc, vVal, mMask
3366     MVT VT = Op.getSimpleValueType();
3367     SDValue Vec = Op.getOperand(1);
3368     SDValue VL = Op.getOperand(3);
3369 
3370     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3371     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3372                                       DAG.getConstant(0, DL, MVT::i32), VL);
3373 
3374     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3375     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3376     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3377     SDValue SelectCond =
3378         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3379                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3380     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3381                        Vec, VL);
3382   }
3383   case Intrinsic::riscv_vslide1up:
3384   case Intrinsic::riscv_vslide1down:
3385   case Intrinsic::riscv_vslide1up_mask:
3386   case Intrinsic::riscv_vslide1down_mask: {
3387     // We need to special case these when the scalar is larger than XLen.
3388     unsigned NumOps = Op.getNumOperands();
3389     bool IsMasked = NumOps == 6;
3390     unsigned OpOffset = IsMasked ? 1 : 0;
3391     SDValue Scalar = Op.getOperand(2 + OpOffset);
3392     if (Scalar.getValueType().bitsLE(XLenVT))
3393       break;
3394 
3395     // Splatting a sign extended constant is fine.
3396     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3397       if (isInt<32>(CVal->getSExtValue()))
3398         break;
3399 
3400     MVT VT = Op.getSimpleValueType();
3401     assert(VT.getVectorElementType() == MVT::i64 &&
3402            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3403 
3404     // Convert the vector source to the equivalent nxvXi32 vector.
3405     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3406     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3407 
3408     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3409                                    DAG.getConstant(0, DL, XLenVT));
3410     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3411                                    DAG.getConstant(1, DL, XLenVT));
3412 
3413     // Double the VL since we halved SEW.
3414     SDValue VL = Op.getOperand(NumOps - 1);
3415     SDValue I32VL =
3416         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3417 
3418     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3419     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3420 
3421     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3422     // instructions.
3423     if (IntNo == Intrinsic::riscv_vslide1up ||
3424         IntNo == Intrinsic::riscv_vslide1up_mask) {
3425       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3426                         I32Mask, I32VL);
3427       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3428                         I32Mask, I32VL);
3429     } else {
3430       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3431                         I32Mask, I32VL);
3432       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3433                         I32Mask, I32VL);
3434     }
3435 
3436     // Convert back to nxvXi64.
3437     Vec = DAG.getBitcast(VT, Vec);
3438 
3439     if (!IsMasked)
3440       return Vec;
3441 
3442     // Apply mask after the operation.
3443     SDValue Mask = Op.getOperand(NumOps - 2);
3444     SDValue MaskedOff = Op.getOperand(1);
3445     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3446   }
3447   }
3448 
3449   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3450 }
3451 
3452 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3453                                                     SelectionDAG &DAG) const {
3454   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3455 }
3456 
3457 static MVT getLMUL1VT(MVT VT) {
3458   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3459          "Unexpected vector MVT");
3460   return MVT::getScalableVectorVT(
3461       VT.getVectorElementType(),
3462       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3463 }
3464 
3465 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3466   switch (ISDOpcode) {
3467   default:
3468     llvm_unreachable("Unhandled reduction");
3469   case ISD::VECREDUCE_ADD:
3470     return RISCVISD::VECREDUCE_ADD_VL;
3471   case ISD::VECREDUCE_UMAX:
3472     return RISCVISD::VECREDUCE_UMAX_VL;
3473   case ISD::VECREDUCE_SMAX:
3474     return RISCVISD::VECREDUCE_SMAX_VL;
3475   case ISD::VECREDUCE_UMIN:
3476     return RISCVISD::VECREDUCE_UMIN_VL;
3477   case ISD::VECREDUCE_SMIN:
3478     return RISCVISD::VECREDUCE_SMIN_VL;
3479   case ISD::VECREDUCE_AND:
3480     return RISCVISD::VECREDUCE_AND_VL;
3481   case ISD::VECREDUCE_OR:
3482     return RISCVISD::VECREDUCE_OR_VL;
3483   case ISD::VECREDUCE_XOR:
3484     return RISCVISD::VECREDUCE_XOR_VL;
3485   }
3486 }
3487 
3488 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3489                                                       SelectionDAG &DAG) const {
3490   SDLoc DL(Op);
3491   SDValue Vec = Op.getOperand(0);
3492   MVT VecVT = Vec.getSimpleValueType();
3493   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3494           Op.getOpcode() == ISD::VECREDUCE_OR ||
3495           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3496          "Unexpected reduction lowering");
3497 
3498   MVT XLenVT = Subtarget.getXLenVT();
3499   assert(Op.getValueType() == XLenVT &&
3500          "Expected reduction output to be legalized to XLenVT");
3501 
3502   MVT ContainerVT = VecVT;
3503   if (VecVT.isFixedLengthVector()) {
3504     ContainerVT = getContainerForFixedLengthVector(VecVT);
3505     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3506   }
3507 
3508   SDValue Mask, VL;
3509   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3510   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3511 
3512   switch (Op.getOpcode()) {
3513   default:
3514     llvm_unreachable("Unhandled reduction");
3515   case ISD::VECREDUCE_AND:
3516     // vpopc ~x == 0
3517     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3518     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3519     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3520   case ISD::VECREDUCE_OR:
3521     // vpopc x != 0
3522     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3523     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3524   case ISD::VECREDUCE_XOR: {
3525     // ((vpopc x) & 1) != 0
3526     SDValue One = DAG.getConstant(1, DL, XLenVT);
3527     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3528     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3529     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3530   }
3531   }
3532 }
3533 
3534 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3535                                             SelectionDAG &DAG) const {
3536   SDLoc DL(Op);
3537   SDValue Vec = Op.getOperand(0);
3538   EVT VecEVT = Vec.getValueType();
3539 
3540   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3541 
3542   // Due to ordering in legalize types we may have a vector type that needs to
3543   // be split. Do that manually so we can get down to a legal type.
3544   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3545          TargetLowering::TypeSplitVector) {
3546     SDValue Lo, Hi;
3547     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3548     VecEVT = Lo.getValueType();
3549     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3550   }
3551 
3552   // TODO: The type may need to be widened rather than split. Or widened before
3553   // it can be split.
3554   if (!isTypeLegal(VecEVT))
3555     return SDValue();
3556 
3557   MVT VecVT = VecEVT.getSimpleVT();
3558   MVT VecEltVT = VecVT.getVectorElementType();
3559   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3560 
3561   MVT ContainerVT = VecVT;
3562   if (VecVT.isFixedLengthVector()) {
3563     ContainerVT = getContainerForFixedLengthVector(VecVT);
3564     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3565   }
3566 
3567   MVT M1VT = getLMUL1VT(ContainerVT);
3568 
3569   SDValue Mask, VL;
3570   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3571 
3572   // FIXME: This is a VLMAX splat which might be too large and can prevent
3573   // vsetvli removal.
3574   SDValue NeutralElem =
3575       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3576   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3577   SDValue Reduction =
3578       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3579   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3580                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3581   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3582 }
3583 
3584 // Given a reduction op, this function returns the matching reduction opcode,
3585 // the vector SDValue and the scalar SDValue required to lower this to a
3586 // RISCVISD node.
3587 static std::tuple<unsigned, SDValue, SDValue>
3588 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3589   SDLoc DL(Op);
3590   auto Flags = Op->getFlags();
3591   unsigned Opcode = Op.getOpcode();
3592   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3593   switch (Opcode) {
3594   default:
3595     llvm_unreachable("Unhandled reduction");
3596   case ISD::VECREDUCE_FADD:
3597     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3598                            DAG.getConstantFP(0.0, DL, EltVT));
3599   case ISD::VECREDUCE_SEQ_FADD:
3600     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3601                            Op.getOperand(0));
3602   case ISD::VECREDUCE_FMIN:
3603     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3604                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3605   case ISD::VECREDUCE_FMAX:
3606     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3607                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3608   }
3609 }
3610 
3611 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3612                                               SelectionDAG &DAG) const {
3613   SDLoc DL(Op);
3614   MVT VecEltVT = Op.getSimpleValueType();
3615 
3616   unsigned RVVOpcode;
3617   SDValue VectorVal, ScalarVal;
3618   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3619       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3620   MVT VecVT = VectorVal.getSimpleValueType();
3621 
3622   MVT ContainerVT = VecVT;
3623   if (VecVT.isFixedLengthVector()) {
3624     ContainerVT = getContainerForFixedLengthVector(VecVT);
3625     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3626   }
3627 
3628   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3629 
3630   SDValue Mask, VL;
3631   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3632 
3633   // FIXME: This is a VLMAX splat which might be too large and can prevent
3634   // vsetvli removal.
3635   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3636   SDValue Reduction =
3637       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3638   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3639                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3640 }
3641 
3642 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3643                                                    SelectionDAG &DAG) const {
3644   SDValue Vec = Op.getOperand(0);
3645   SDValue SubVec = Op.getOperand(1);
3646   MVT VecVT = Vec.getSimpleValueType();
3647   MVT SubVecVT = SubVec.getSimpleValueType();
3648 
3649   SDLoc DL(Op);
3650   MVT XLenVT = Subtarget.getXLenVT();
3651   unsigned OrigIdx = Op.getConstantOperandVal(2);
3652   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3653 
3654   // We don't have the ability to slide mask vectors up indexed by their i1
3655   // elements; the smallest we can do is i8. Often we are able to bitcast to
3656   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3657   // into a scalable one, we might not necessarily have enough scalable
3658   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3659   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3660       (OrigIdx != 0 || !Vec.isUndef())) {
3661     if (VecVT.getVectorMinNumElements() >= 8 &&
3662         SubVecVT.getVectorMinNumElements() >= 8) {
3663       assert(OrigIdx % 8 == 0 && "Invalid index");
3664       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3665              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3666              "Unexpected mask vector lowering");
3667       OrigIdx /= 8;
3668       SubVecVT =
3669           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3670                            SubVecVT.isScalableVector());
3671       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3672                                VecVT.isScalableVector());
3673       Vec = DAG.getBitcast(VecVT, Vec);
3674       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3675     } else {
3676       // We can't slide this mask vector up indexed by its i1 elements.
3677       // This poses a problem when we wish to insert a scalable vector which
3678       // can't be re-expressed as a larger type. Just choose the slow path and
3679       // extend to a larger type, then truncate back down.
3680       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3681       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3682       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3683       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3684       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3685                         Op.getOperand(2));
3686       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3687       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3688     }
3689   }
3690 
3691   // If the subvector vector is a fixed-length type, we cannot use subregister
3692   // manipulation to simplify the codegen; we don't know which register of a
3693   // LMUL group contains the specific subvector as we only know the minimum
3694   // register size. Therefore we must slide the vector group up the full
3695   // amount.
3696   if (SubVecVT.isFixedLengthVector()) {
3697     if (OrigIdx == 0 && Vec.isUndef())
3698       return Op;
3699     MVT ContainerVT = VecVT;
3700     if (VecVT.isFixedLengthVector()) {
3701       ContainerVT = getContainerForFixedLengthVector(VecVT);
3702       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3703     }
3704     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3705                          DAG.getUNDEF(ContainerVT), SubVec,
3706                          DAG.getConstant(0, DL, XLenVT));
3707     SDValue Mask =
3708         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3709     // Set the vector length to only the number of elements we care about. Note
3710     // that for slideup this includes the offset.
3711     SDValue VL =
3712         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3713     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3714     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3715                                   SubVec, SlideupAmt, Mask, VL);
3716     if (VecVT.isFixedLengthVector())
3717       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3718     return DAG.getBitcast(Op.getValueType(), Slideup);
3719   }
3720 
3721   unsigned SubRegIdx, RemIdx;
3722   std::tie(SubRegIdx, RemIdx) =
3723       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3724           VecVT, SubVecVT, OrigIdx, TRI);
3725 
3726   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3727   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3728                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3729                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3730 
3731   // 1. If the Idx has been completely eliminated and this subvector's size is
3732   // a vector register or a multiple thereof, or the surrounding elements are
3733   // undef, then this is a subvector insert which naturally aligns to a vector
3734   // register. These can easily be handled using subregister manipulation.
3735   // 2. If the subvector is smaller than a vector register, then the insertion
3736   // must preserve the undisturbed elements of the register. We do this by
3737   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3738   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3739   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3740   // LMUL=1 type back into the larger vector (resolving to another subregister
3741   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3742   // to avoid allocating a large register group to hold our subvector.
3743   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3744     return Op;
3745 
3746   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3747   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3748   // (in our case undisturbed). This means we can set up a subvector insertion
3749   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3750   // size of the subvector.
3751   MVT InterSubVT = VecVT;
3752   SDValue AlignedExtract = Vec;
3753   unsigned AlignedIdx = OrigIdx - RemIdx;
3754   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3755     InterSubVT = getLMUL1VT(VecVT);
3756     // Extract a subvector equal to the nearest full vector register type. This
3757     // should resolve to a EXTRACT_SUBREG instruction.
3758     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3759                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3760   }
3761 
3762   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3763   // For scalable vectors this must be further multiplied by vscale.
3764   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3765 
3766   SDValue Mask, VL;
3767   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3768 
3769   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3770   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3771   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3772   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3773 
3774   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3775                        DAG.getUNDEF(InterSubVT), SubVec,
3776                        DAG.getConstant(0, DL, XLenVT));
3777 
3778   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3779                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3780 
3781   // If required, insert this subvector back into the correct vector register.
3782   // This should resolve to an INSERT_SUBREG instruction.
3783   if (VecVT.bitsGT(InterSubVT))
3784     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3785                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3786 
3787   // We might have bitcast from a mask type: cast back to the original type if
3788   // required.
3789   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3790 }
3791 
3792 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3793                                                     SelectionDAG &DAG) const {
3794   SDValue Vec = Op.getOperand(0);
3795   MVT SubVecVT = Op.getSimpleValueType();
3796   MVT VecVT = Vec.getSimpleValueType();
3797 
3798   SDLoc DL(Op);
3799   MVT XLenVT = Subtarget.getXLenVT();
3800   unsigned OrigIdx = Op.getConstantOperandVal(1);
3801   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3802 
3803   // We don't have the ability to slide mask vectors down indexed by their i1
3804   // elements; the smallest we can do is i8. Often we are able to bitcast to
3805   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3806   // from a scalable one, we might not necessarily have enough scalable
3807   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3808   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3809     if (VecVT.getVectorMinNumElements() >= 8 &&
3810         SubVecVT.getVectorMinNumElements() >= 8) {
3811       assert(OrigIdx % 8 == 0 && "Invalid index");
3812       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3813              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3814              "Unexpected mask vector lowering");
3815       OrigIdx /= 8;
3816       SubVecVT =
3817           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3818                            SubVecVT.isScalableVector());
3819       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3820                                VecVT.isScalableVector());
3821       Vec = DAG.getBitcast(VecVT, Vec);
3822     } else {
3823       // We can't slide this mask vector down, indexed by its i1 elements.
3824       // This poses a problem when we wish to extract a scalable vector which
3825       // can't be re-expressed as a larger type. Just choose the slow path and
3826       // extend to a larger type, then truncate back down.
3827       // TODO: We could probably improve this when extracting certain fixed
3828       // from fixed, where we can extract as i8 and shift the correct element
3829       // right to reach the desired subvector?
3830       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3831       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3832       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3833       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3834                         Op.getOperand(1));
3835       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3836       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3837     }
3838   }
3839 
3840   // If the subvector vector is a fixed-length type, we cannot use subregister
3841   // manipulation to simplify the codegen; we don't know which register of a
3842   // LMUL group contains the specific subvector as we only know the minimum
3843   // register size. Therefore we must slide the vector group down the full
3844   // amount.
3845   if (SubVecVT.isFixedLengthVector()) {
3846     // With an index of 0 this is a cast-like subvector, which can be performed
3847     // with subregister operations.
3848     if (OrigIdx == 0)
3849       return Op;
3850     MVT ContainerVT = VecVT;
3851     if (VecVT.isFixedLengthVector()) {
3852       ContainerVT = getContainerForFixedLengthVector(VecVT);
3853       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3854     }
3855     SDValue Mask =
3856         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3857     // Set the vector length to only the number of elements we care about. This
3858     // avoids sliding down elements we're going to discard straight away.
3859     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3860     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3861     SDValue Slidedown =
3862         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3863                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3864     // Now we can use a cast-like subvector extract to get the result.
3865     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3866                             DAG.getConstant(0, DL, XLenVT));
3867     return DAG.getBitcast(Op.getValueType(), Slidedown);
3868   }
3869 
3870   unsigned SubRegIdx, RemIdx;
3871   std::tie(SubRegIdx, RemIdx) =
3872       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3873           VecVT, SubVecVT, OrigIdx, TRI);
3874 
3875   // If the Idx has been completely eliminated then this is a subvector extract
3876   // which naturally aligns to a vector register. These can easily be handled
3877   // using subregister manipulation.
3878   if (RemIdx == 0)
3879     return Op;
3880 
3881   // Else we must shift our vector register directly to extract the subvector.
3882   // Do this using VSLIDEDOWN.
3883 
3884   // If the vector type is an LMUL-group type, extract a subvector equal to the
3885   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3886   // instruction.
3887   MVT InterSubVT = VecVT;
3888   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3889     InterSubVT = getLMUL1VT(VecVT);
3890     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3891                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3892   }
3893 
3894   // Slide this vector register down by the desired number of elements in order
3895   // to place the desired subvector starting at element 0.
3896   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3897   // For scalable vectors this must be further multiplied by vscale.
3898   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3899 
3900   SDValue Mask, VL;
3901   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3902   SDValue Slidedown =
3903       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3904                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3905 
3906   // Now the vector is in the right position, extract our final subvector. This
3907   // should resolve to a COPY.
3908   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3909                           DAG.getConstant(0, DL, XLenVT));
3910 
3911   // We might have bitcast from a mask type: cast back to the original type if
3912   // required.
3913   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3914 }
3915 
3916 // Lower step_vector to the vid instruction. Any non-identity step value must
3917 // be accounted for my manual expansion.
3918 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
3919                                               SelectionDAG &DAG) const {
3920   SDLoc DL(Op);
3921   MVT VT = Op.getSimpleValueType();
3922   MVT XLenVT = Subtarget.getXLenVT();
3923   SDValue Mask, VL;
3924   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
3925   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3926   uint64_t StepValImm = Op.getConstantOperandVal(0);
3927   if (StepValImm != 1) {
3928     assert(Op.getOperand(0).getValueType() == XLenVT &&
3929            "Unexpected step value type");
3930     if (isPowerOf2_64(StepValImm)) {
3931       SDValue StepVal =
3932           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3933                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
3934       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
3935     } else {
3936       SDValue StepVal =
3937           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
3938       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
3939     }
3940   }
3941   return StepVec;
3942 }
3943 
3944 // Implement vector_reverse using vrgather.vv with indices determined by
3945 // subtracting the id of each element from (VLMAX-1). This will convert
3946 // the indices like so:
3947 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3948 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3949 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3950                                                  SelectionDAG &DAG) const {
3951   SDLoc DL(Op);
3952   MVT VecVT = Op.getSimpleValueType();
3953   unsigned EltSize = VecVT.getScalarSizeInBits();
3954   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3955 
3956   unsigned MaxVLMAX = 0;
3957   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3958   if (VectorBitsMax != 0)
3959     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3960 
3961   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3962   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3963 
3964   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3965   // to use vrgatherei16.vv.
3966   // TODO: It's also possible to use vrgatherei16.vv for other types to
3967   // decrease register width for the index calculation.
3968   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3969     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3970     // Reverse each half, then reassemble them in reverse order.
3971     // NOTE: It's also possible that after splitting that VLMAX no longer
3972     // requires vrgatherei16.vv.
3973     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3974       SDValue Lo, Hi;
3975       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3976       EVT LoVT, HiVT;
3977       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3978       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3979       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3980       // Reassemble the low and high pieces reversed.
3981       // FIXME: This is a CONCAT_VECTORS.
3982       SDValue Res =
3983           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3984                       DAG.getIntPtrConstant(0, DL));
3985       return DAG.getNode(
3986           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3987           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3988     }
3989 
3990     // Just promote the int type to i16 which will double the LMUL.
3991     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
3992     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
3993   }
3994 
3995   MVT XLenVT = Subtarget.getXLenVT();
3996   SDValue Mask, VL;
3997   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3998 
3999   // Calculate VLMAX-1 for the desired SEW.
4000   unsigned MinElts = VecVT.getVectorMinNumElements();
4001   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4002                               DAG.getConstant(MinElts, DL, XLenVT));
4003   SDValue VLMinus1 =
4004       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4005 
4006   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4007   bool IsRV32E64 =
4008       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4009   SDValue SplatVL;
4010   if (!IsRV32E64)
4011     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4012   else
4013     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4014 
4015   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4016   SDValue Indices =
4017       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4018 
4019   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4020 }
4021 
4022 SDValue
4023 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4024                                                      SelectionDAG &DAG) const {
4025   auto *Load = cast<LoadSDNode>(Op);
4026 
4027   SDLoc DL(Op);
4028   MVT VT = Op.getSimpleValueType();
4029   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4030 
4031   SDValue VL =
4032       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4033 
4034   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4035   SDValue NewLoad = DAG.getMemIntrinsicNode(
4036       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4037       Load->getMemoryVT(), Load->getMemOperand());
4038 
4039   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4040   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4041 }
4042 
4043 SDValue
4044 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4045                                                       SelectionDAG &DAG) const {
4046   auto *Store = cast<StoreSDNode>(Op);
4047 
4048   SDLoc DL(Op);
4049   SDValue StoreVal = Store->getValue();
4050   MVT VT = StoreVal.getSimpleValueType();
4051 
4052   // If the size less than a byte, we need to pad with zeros to make a byte.
4053   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4054     VT = MVT::v8i1;
4055     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4056                            DAG.getConstant(0, DL, VT), StoreVal,
4057                            DAG.getIntPtrConstant(0, DL));
4058   }
4059 
4060   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4061 
4062   SDValue VL =
4063       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4064 
4065   SDValue NewValue =
4066       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4067   return DAG.getMemIntrinsicNode(
4068       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4069       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4070       Store->getMemoryVT(), Store->getMemOperand());
4071 }
4072 
4073 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4074   auto *Load = cast<MaskedLoadSDNode>(Op);
4075 
4076   SDLoc DL(Op);
4077   MVT VT = Op.getSimpleValueType();
4078   MVT XLenVT = Subtarget.getXLenVT();
4079 
4080   SDValue Mask = Load->getMask();
4081   SDValue PassThru = Load->getPassThru();
4082   SDValue VL;
4083 
4084   MVT ContainerVT = VT;
4085   if (VT.isFixedLengthVector()) {
4086     ContainerVT = getContainerForFixedLengthVector(VT);
4087     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4088 
4089     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4090     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4091     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4092   } else
4093     VL = DAG.getRegister(RISCV::X0, XLenVT);
4094 
4095   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4096   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4097   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4098                    Load->getBasePtr(), Mask,  VL};
4099   SDValue Result =
4100       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4101                               Load->getMemoryVT(), Load->getMemOperand());
4102   SDValue Chain = Result.getValue(1);
4103 
4104   if (VT.isFixedLengthVector())
4105     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4106 
4107   return DAG.getMergeValues({Result, Chain}, DL);
4108 }
4109 
4110 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4111   auto *Store = cast<MaskedStoreSDNode>(Op);
4112 
4113   SDLoc DL(Op);
4114   SDValue Val = Store->getValue();
4115   SDValue Mask = Store->getMask();
4116   MVT VT = Val.getSimpleValueType();
4117   MVT XLenVT = Subtarget.getXLenVT();
4118   SDValue VL;
4119 
4120   MVT ContainerVT = VT;
4121   if (VT.isFixedLengthVector()) {
4122     ContainerVT = getContainerForFixedLengthVector(VT);
4123     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4124 
4125     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4126     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4127     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4128   } else
4129     VL = DAG.getRegister(RISCV::X0, XLenVT);
4130 
4131   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4132   return DAG.getMemIntrinsicNode(
4133       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4134       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4135       Store->getMemoryVT(), Store->getMemOperand());
4136 }
4137 
4138 SDValue
4139 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4140                                                       SelectionDAG &DAG) const {
4141   MVT InVT = Op.getOperand(0).getSimpleValueType();
4142   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4143 
4144   MVT VT = Op.getSimpleValueType();
4145 
4146   SDValue Op1 =
4147       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4148   SDValue Op2 =
4149       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4150 
4151   SDLoc DL(Op);
4152   SDValue VL =
4153       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4154 
4155   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4156   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4157 
4158   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4159                             Op.getOperand(2), Mask, VL);
4160 
4161   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4162 }
4163 
4164 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4165     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4166   MVT VT = Op.getSimpleValueType();
4167 
4168   if (VT.getVectorElementType() == MVT::i1)
4169     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4170 
4171   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4172 }
4173 
4174 // Lower vector ABS to smax(X, sub(0, X)).
4175 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4176   SDLoc DL(Op);
4177   MVT VT = Op.getSimpleValueType();
4178   SDValue X = Op.getOperand(0);
4179 
4180   assert(VT.isFixedLengthVector() && "Unexpected type");
4181 
4182   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4183   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4184 
4185   SDValue Mask, VL;
4186   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4187 
4188   SDValue SplatZero =
4189       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4190                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4191   SDValue NegX =
4192       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4193   SDValue Max =
4194       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4195 
4196   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4197 }
4198 
4199 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4200     SDValue Op, SelectionDAG &DAG) const {
4201   SDLoc DL(Op);
4202   MVT VT = Op.getSimpleValueType();
4203   SDValue Mag = Op.getOperand(0);
4204   SDValue Sign = Op.getOperand(1);
4205   assert(Mag.getValueType() == Sign.getValueType() &&
4206          "Can only handle COPYSIGN with matching types.");
4207 
4208   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4209   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4210   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4211 
4212   SDValue Mask, VL;
4213   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4214 
4215   SDValue CopySign =
4216       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4217 
4218   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4219 }
4220 
4221 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4222     SDValue Op, SelectionDAG &DAG) const {
4223   MVT VT = Op.getSimpleValueType();
4224   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4225 
4226   MVT I1ContainerVT =
4227       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4228 
4229   SDValue CC =
4230       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4231   SDValue Op1 =
4232       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4233   SDValue Op2 =
4234       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4235 
4236   SDLoc DL(Op);
4237   SDValue Mask, VL;
4238   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4239 
4240   SDValue Select =
4241       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4242 
4243   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4244 }
4245 
4246 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4247                                                unsigned NewOpc,
4248                                                bool HasMask) const {
4249   MVT VT = Op.getSimpleValueType();
4250   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4251 
4252   // Create list of operands by converting existing ones to scalable types.
4253   SmallVector<SDValue, 6> Ops;
4254   for (const SDValue &V : Op->op_values()) {
4255     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4256 
4257     // Pass through non-vector operands.
4258     if (!V.getValueType().isVector()) {
4259       Ops.push_back(V);
4260       continue;
4261     }
4262 
4263     // "cast" fixed length vector to a scalable vector.
4264     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4265            "Only fixed length vectors are supported!");
4266     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4267   }
4268 
4269   SDLoc DL(Op);
4270   SDValue Mask, VL;
4271   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4272   if (HasMask)
4273     Ops.push_back(Mask);
4274   Ops.push_back(VL);
4275 
4276   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4277   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4278 }
4279 
4280 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4281 // * Operands of each node are assumed to be in the same order.
4282 // * The EVL operand is promoted from i32 to i64 on RV64.
4283 // * Fixed-length vectors are converted to their scalable-vector container
4284 //   types.
4285 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4286                                        unsigned RISCVISDOpc) const {
4287   SDLoc DL(Op);
4288   MVT VT = Op.getSimpleValueType();
4289   Optional<unsigned> EVLIdx = ISD::getVPExplicitVectorLengthIdx(Op.getOpcode());
4290 
4291   SmallVector<SDValue, 4> Ops;
4292   MVT XLenVT = Subtarget.getXLenVT();
4293 
4294   for (const auto &OpIdx : enumerate(Op->ops())) {
4295     SDValue V = OpIdx.value();
4296     if ((unsigned)OpIdx.index() == EVLIdx) {
4297       Ops.push_back(DAG.getZExtOrTrunc(V, DL, XLenVT));
4298       continue;
4299     }
4300     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4301     // Pass through operands which aren't fixed-length vectors.
4302     if (!V.getValueType().isFixedLengthVector()) {
4303       Ops.push_back(V);
4304       continue;
4305     }
4306     // "cast" fixed length vector to a scalable vector.
4307     MVT OpVT = V.getSimpleValueType();
4308     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4309     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4310            "Only fixed length vectors are supported!");
4311     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4312   }
4313 
4314   if (!VT.isFixedLengthVector())
4315     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4316 
4317   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4318 
4319   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4320 
4321   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4322 }
4323 
4324 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4325 // a RVV indexed load. The RVV indexed load instructions only support the
4326 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4327 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4328 // indexing is extended to the XLEN value type and scaled accordingly.
4329 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4330   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4331   SDLoc DL(Op);
4332 
4333   SDValue Index = MGN->getIndex();
4334   SDValue Mask = MGN->getMask();
4335   SDValue PassThru = MGN->getPassThru();
4336 
4337   MVT VT = Op.getSimpleValueType();
4338   MVT IndexVT = Index.getSimpleValueType();
4339   MVT XLenVT = Subtarget.getXLenVT();
4340 
4341   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4342          "Unexpected VTs!");
4343   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4344          "Unexpected pointer type");
4345   // Targets have to explicitly opt-in for extending vector loads.
4346   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4347          "Unexpected extending MGATHER");
4348 
4349   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4350   // the selection of the masked intrinsics doesn't do this for us.
4351   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4352 
4353   SDValue VL;
4354   MVT ContainerVT = VT;
4355   if (VT.isFixedLengthVector()) {
4356     // We need to use the larger of the result and index type to determine the
4357     // scalable type to use so we don't increase LMUL for any operand/result.
4358     if (VT.bitsGE(IndexVT)) {
4359       ContainerVT = getContainerForFixedLengthVector(VT);
4360       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4361                                  ContainerVT.getVectorElementCount());
4362     } else {
4363       IndexVT = getContainerForFixedLengthVector(IndexVT);
4364       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4365                                      IndexVT.getVectorElementCount());
4366     }
4367 
4368     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4369 
4370     if (!IsUnmasked) {
4371       MVT MaskVT =
4372           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4373       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4374       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4375     }
4376 
4377     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4378   } else
4379     VL = DAG.getRegister(RISCV::X0, XLenVT);
4380 
4381   unsigned IntID =
4382       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4383   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4384                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4385   if (!IsUnmasked)
4386     Ops.push_back(PassThru);
4387   Ops.push_back(MGN->getBasePtr());
4388   Ops.push_back(Index);
4389   if (!IsUnmasked)
4390     Ops.push_back(Mask);
4391   Ops.push_back(VL);
4392 
4393   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4394   SDValue Result =
4395       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4396                               MGN->getMemoryVT(), MGN->getMemOperand());
4397   SDValue Chain = Result.getValue(1);
4398 
4399   if (VT.isFixedLengthVector())
4400     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4401 
4402   return DAG.getMergeValues({Result, Chain}, DL);
4403 }
4404 
4405 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4406 // a RVV indexed store. The RVV indexed store instructions only support the
4407 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4408 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4409 // indexing is extended to the XLEN value type and scaled accordingly.
4410 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4411                                            SelectionDAG &DAG) const {
4412   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4413   SDLoc DL(Op);
4414   SDValue Index = MSN->getIndex();
4415   SDValue Mask = MSN->getMask();
4416   SDValue Val = MSN->getValue();
4417 
4418   MVT VT = Val.getSimpleValueType();
4419   MVT IndexVT = Index.getSimpleValueType();
4420   MVT XLenVT = Subtarget.getXLenVT();
4421 
4422   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4423          "Unexpected VTs!");
4424   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4425          "Unexpected pointer type");
4426   // Targets have to explicitly opt-in for extending vector loads and
4427   // truncating vector stores.
4428   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4429 
4430   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4431   // the selection of the masked intrinsics doesn't do this for us.
4432   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4433 
4434   SDValue VL;
4435   if (VT.isFixedLengthVector()) {
4436     // We need to use the larger of the value and index type to determine the
4437     // scalable type to use so we don't increase LMUL for any operand/result.
4438     MVT ContainerVT;
4439     if (VT.bitsGE(IndexVT)) {
4440       ContainerVT = getContainerForFixedLengthVector(VT);
4441       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4442                                  ContainerVT.getVectorElementCount());
4443     } else {
4444       IndexVT = getContainerForFixedLengthVector(IndexVT);
4445       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4446                                      IndexVT.getVectorElementCount());
4447     }
4448 
4449     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4450     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4451 
4452     if (!IsUnmasked) {
4453       MVT MaskVT =
4454           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4455       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4456     }
4457 
4458     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4459   } else
4460     VL = DAG.getRegister(RISCV::X0, XLenVT);
4461 
4462   unsigned IntID =
4463       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4464   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4465                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4466   Ops.push_back(Val);
4467   Ops.push_back(MSN->getBasePtr());
4468   Ops.push_back(Index);
4469   if (!IsUnmasked)
4470     Ops.push_back(Mask);
4471   Ops.push_back(VL);
4472 
4473   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4474                                  MSN->getMemoryVT(), MSN->getMemOperand());
4475 }
4476 
4477 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4478                                                SelectionDAG &DAG) const {
4479   const MVT XLenVT = Subtarget.getXLenVT();
4480   SDLoc DL(Op);
4481   SDValue Chain = Op->getOperand(0);
4482   SDValue SysRegNo = DAG.getConstant(
4483       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4484   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4485   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4486 
4487   // Encoding used for rounding mode in RISCV differs from that used in
4488   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4489   // table, which consists of a sequence of 4-bit fields, each representing
4490   // corresponding FLT_ROUNDS mode.
4491   static const int Table =
4492       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4493       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4494       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4495       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4496       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4497 
4498   SDValue Shift =
4499       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4500   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4501                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4502   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4503                                DAG.getConstant(7, DL, XLenVT));
4504 
4505   return DAG.getMergeValues({Masked, Chain}, DL);
4506 }
4507 
4508 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4509                                                SelectionDAG &DAG) const {
4510   const MVT XLenVT = Subtarget.getXLenVT();
4511   SDLoc DL(Op);
4512   SDValue Chain = Op->getOperand(0);
4513   SDValue RMValue = Op->getOperand(1);
4514   SDValue SysRegNo = DAG.getConstant(
4515       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4516 
4517   // Encoding used for rounding mode in RISCV differs from that used in
4518   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4519   // a table, which consists of a sequence of 4-bit fields, each representing
4520   // corresponding RISCV mode.
4521   static const unsigned Table =
4522       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4523       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4524       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4525       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4526       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4527 
4528   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4529                               DAG.getConstant(2, DL, XLenVT));
4530   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4531                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4532   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4533                         DAG.getConstant(0x7, DL, XLenVT));
4534   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4535                      RMValue);
4536 }
4537 
4538 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4539 // form of the given Opcode.
4540 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4541   switch (Opcode) {
4542   default:
4543     llvm_unreachable("Unexpected opcode");
4544   case ISD::SHL:
4545     return RISCVISD::SLLW;
4546   case ISD::SRA:
4547     return RISCVISD::SRAW;
4548   case ISD::SRL:
4549     return RISCVISD::SRLW;
4550   case ISD::SDIV:
4551     return RISCVISD::DIVW;
4552   case ISD::UDIV:
4553     return RISCVISD::DIVUW;
4554   case ISD::UREM:
4555     return RISCVISD::REMUW;
4556   case ISD::ROTL:
4557     return RISCVISD::ROLW;
4558   case ISD::ROTR:
4559     return RISCVISD::RORW;
4560   case RISCVISD::GREV:
4561     return RISCVISD::GREVW;
4562   case RISCVISD::GORC:
4563     return RISCVISD::GORCW;
4564   }
4565 }
4566 
4567 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4568 // Because i32 isn't a legal type for RV64, these operations would otherwise
4569 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4570 // later one because the fact the operation was originally of type i32 is
4571 // lost.
4572 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4573                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4574   SDLoc DL(N);
4575   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4576   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4577   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4578   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4579   // ReplaceNodeResults requires we maintain the same type for the return value.
4580   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4581 }
4582 
4583 // Converts the given 32-bit operation to a i64 operation with signed extension
4584 // semantic to reduce the signed extension instructions.
4585 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4586   SDLoc DL(N);
4587   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4588   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4589   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4590   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4591                                DAG.getValueType(MVT::i32));
4592   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4593 }
4594 
4595 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4596                                              SmallVectorImpl<SDValue> &Results,
4597                                              SelectionDAG &DAG) const {
4598   SDLoc DL(N);
4599   switch (N->getOpcode()) {
4600   default:
4601     llvm_unreachable("Don't know how to custom type legalize this operation!");
4602   case ISD::STRICT_FP_TO_SINT:
4603   case ISD::STRICT_FP_TO_UINT:
4604   case ISD::FP_TO_SINT:
4605   case ISD::FP_TO_UINT: {
4606     bool IsStrict = N->isStrictFPOpcode();
4607     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4608            "Unexpected custom legalisation");
4609     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4610     // If the FP type needs to be softened, emit a library call using the 'si'
4611     // version. If we left it to default legalization we'd end up with 'di'. If
4612     // the FP type doesn't need to be softened just let generic type
4613     // legalization promote the result type.
4614     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4615         TargetLowering::TypeSoftenFloat)
4616       return;
4617     RTLIB::Libcall LC;
4618     if (N->getOpcode() == ISD::FP_TO_SINT ||
4619         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4620       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4621     else
4622       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4623     MakeLibCallOptions CallOptions;
4624     EVT OpVT = Op0.getValueType();
4625     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4626     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4627     SDValue Result;
4628     std::tie(Result, Chain) =
4629         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4630     Results.push_back(Result);
4631     if (IsStrict)
4632       Results.push_back(Chain);
4633     break;
4634   }
4635   case ISD::READCYCLECOUNTER: {
4636     assert(!Subtarget.is64Bit() &&
4637            "READCYCLECOUNTER only has custom type legalization on riscv32");
4638 
4639     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4640     SDValue RCW =
4641         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4642 
4643     Results.push_back(
4644         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4645     Results.push_back(RCW.getValue(2));
4646     break;
4647   }
4648   case ISD::MUL: {
4649     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4650     unsigned XLen = Subtarget.getXLen();
4651     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4652     if (Size > XLen) {
4653       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4654       SDValue LHS = N->getOperand(0);
4655       SDValue RHS = N->getOperand(1);
4656       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4657 
4658       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4659       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4660       // We need exactly one side to be unsigned.
4661       if (LHSIsU == RHSIsU)
4662         return;
4663 
4664       auto MakeMULPair = [&](SDValue S, SDValue U) {
4665         MVT XLenVT = Subtarget.getXLenVT();
4666         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4667         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4668         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4669         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4670         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4671       };
4672 
4673       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4674       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4675 
4676       // The other operand should be signed, but still prefer MULH when
4677       // possible.
4678       if (RHSIsU && LHSIsS && !RHSIsS)
4679         Results.push_back(MakeMULPair(LHS, RHS));
4680       else if (LHSIsU && RHSIsS && !LHSIsS)
4681         Results.push_back(MakeMULPair(RHS, LHS));
4682 
4683       return;
4684     }
4685     LLVM_FALLTHROUGH;
4686   }
4687   case ISD::ADD:
4688   case ISD::SUB:
4689     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4690            "Unexpected custom legalisation");
4691     if (N->getOperand(1).getOpcode() == ISD::Constant)
4692       return;
4693     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4694     break;
4695   case ISD::SHL:
4696   case ISD::SRA:
4697   case ISD::SRL:
4698     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4699            "Unexpected custom legalisation");
4700     if (N->getOperand(1).getOpcode() == ISD::Constant)
4701       return;
4702     Results.push_back(customLegalizeToWOp(N, DAG));
4703     break;
4704   case ISD::ROTL:
4705   case ISD::ROTR:
4706     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4707            "Unexpected custom legalisation");
4708     Results.push_back(customLegalizeToWOp(N, DAG));
4709     break;
4710   case ISD::CTTZ:
4711   case ISD::CTTZ_ZERO_UNDEF:
4712   case ISD::CTLZ:
4713   case ISD::CTLZ_ZERO_UNDEF: {
4714     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4715            "Unexpected custom legalisation");
4716 
4717     SDValue NewOp0 =
4718         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4719     bool IsCTZ =
4720         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4721     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4722     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4723     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4724     return;
4725   }
4726   case ISD::SDIV:
4727   case ISD::UDIV:
4728   case ISD::UREM: {
4729     MVT VT = N->getSimpleValueType(0);
4730     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4731            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4732            "Unexpected custom legalisation");
4733     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4734         N->getOperand(1).getOpcode() == ISD::Constant)
4735       return;
4736 
4737     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4738     // the upper 32 bits. For other types we need to sign or zero extend
4739     // based on the opcode.
4740     unsigned ExtOpc = ISD::ANY_EXTEND;
4741     if (VT != MVT::i32)
4742       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4743                                            : ISD::ZERO_EXTEND;
4744 
4745     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4746     break;
4747   }
4748   case ISD::UADDO:
4749   case ISD::USUBO: {
4750     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4751            "Unexpected custom legalisation");
4752     bool IsAdd = N->getOpcode() == ISD::UADDO;
4753     // Create an ADDW or SUBW.
4754     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4755     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4756     SDValue Res =
4757         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4758     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4759                       DAG.getValueType(MVT::i32));
4760 
4761     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4762     // Since the inputs are sign extended from i32, this is equivalent to
4763     // comparing the lower 32 bits.
4764     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4765     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4766                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4767 
4768     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4769     Results.push_back(Overflow);
4770     return;
4771   }
4772   case ISD::UADDSAT:
4773   case ISD::USUBSAT: {
4774     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4775            "Unexpected custom legalisation");
4776     if (Subtarget.hasStdExtZbb()) {
4777       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4778       // sign extend allows overflow of the lower 32 bits to be detected on
4779       // the promoted size.
4780       SDValue LHS =
4781           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4782       SDValue RHS =
4783           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4784       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4785       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4786       return;
4787     }
4788 
4789     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4790     // promotion for UADDO/USUBO.
4791     Results.push_back(expandAddSubSat(N, DAG));
4792     return;
4793   }
4794   case ISD::BITCAST: {
4795     EVT VT = N->getValueType(0);
4796     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4797     SDValue Op0 = N->getOperand(0);
4798     EVT Op0VT = Op0.getValueType();
4799     MVT XLenVT = Subtarget.getXLenVT();
4800     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4801       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4802       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4803     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4804                Subtarget.hasStdExtF()) {
4805       SDValue FPConv =
4806           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4807       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4808     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4809                isTypeLegal(Op0VT)) {
4810       // Custom-legalize bitcasts from fixed-length vector types to illegal
4811       // scalar types in order to improve codegen. Bitcast the vector to a
4812       // one-element vector type whose element type is the same as the result
4813       // type, and extract the first element.
4814       LLVMContext &Context = *DAG.getContext();
4815       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4816       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4817                                     DAG.getConstant(0, DL, XLenVT)));
4818     }
4819     break;
4820   }
4821   case RISCVISD::GREV:
4822   case RISCVISD::GORC: {
4823     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4824            "Unexpected custom legalisation");
4825     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4826     // This is similar to customLegalizeToWOp, except that we pass the second
4827     // operand (a TargetConstant) straight through: it is already of type
4828     // XLenVT.
4829     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4830     SDValue NewOp0 =
4831         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4832     SDValue NewOp1 =
4833         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4834     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4835     // ReplaceNodeResults requires we maintain the same type for the return
4836     // value.
4837     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4838     break;
4839   }
4840   case RISCVISD::SHFL: {
4841     // There is no SHFLIW instruction, but we can just promote the operation.
4842     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4843            "Unexpected custom legalisation");
4844     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4845     SDValue NewOp0 =
4846         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4847     SDValue NewOp1 =
4848         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4849     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4850     // ReplaceNodeResults requires we maintain the same type for the return
4851     // value.
4852     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4853     break;
4854   }
4855   case ISD::BSWAP:
4856   case ISD::BITREVERSE: {
4857     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4858            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4859     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
4860                                  N->getOperand(0));
4861     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
4862     SDValue GREVIW = DAG.getNode(RISCVISD::GREVW, DL, MVT::i64, NewOp0,
4863                                  DAG.getConstant(Imm, DL, MVT::i64));
4864     // ReplaceNodeResults requires we maintain the same type for the return
4865     // value.
4866     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
4867     break;
4868   }
4869   case ISD::FSHL:
4870   case ISD::FSHR: {
4871     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4872            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4873     SDValue NewOp0 =
4874         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4875     SDValue NewOp1 =
4876         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4877     SDValue NewOp2 =
4878         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4879     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4880     // Mask the shift amount to 5 bits.
4881     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4882                          DAG.getConstant(0x1f, DL, MVT::i64));
4883     unsigned Opc =
4884         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
4885     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
4886     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
4887     break;
4888   }
4889   case ISD::EXTRACT_VECTOR_ELT: {
4890     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
4891     // type is illegal (currently only vXi64 RV32).
4892     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
4893     // transferred to the destination register. We issue two of these from the
4894     // upper- and lower- halves of the SEW-bit vector element, slid down to the
4895     // first element.
4896     SDValue Vec = N->getOperand(0);
4897     SDValue Idx = N->getOperand(1);
4898 
4899     // The vector type hasn't been legalized yet so we can't issue target
4900     // specific nodes if it needs legalization.
4901     // FIXME: We would manually legalize if it's important.
4902     if (!isTypeLegal(Vec.getValueType()))
4903       return;
4904 
4905     MVT VecVT = Vec.getSimpleValueType();
4906 
4907     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
4908            VecVT.getVectorElementType() == MVT::i64 &&
4909            "Unexpected EXTRACT_VECTOR_ELT legalization");
4910 
4911     // If this is a fixed vector, we need to convert it to a scalable vector.
4912     MVT ContainerVT = VecVT;
4913     if (VecVT.isFixedLengthVector()) {
4914       ContainerVT = getContainerForFixedLengthVector(VecVT);
4915       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4916     }
4917 
4918     MVT XLenVT = Subtarget.getXLenVT();
4919 
4920     // Use a VL of 1 to avoid processing more elements than we need.
4921     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4922     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4923     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4924 
4925     // Unless the index is known to be 0, we must slide the vector down to get
4926     // the desired element into index 0.
4927     if (!isNullConstant(Idx)) {
4928       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4929                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4930     }
4931 
4932     // Extract the lower XLEN bits of the correct vector element.
4933     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4934 
4935     // To extract the upper XLEN bits of the vector element, shift the first
4936     // element right by 32 bits and re-extract the lower XLEN bits.
4937     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4938                                      DAG.getConstant(32, DL, XLenVT), VL);
4939     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
4940                                  ThirtyTwoV, Mask, VL);
4941 
4942     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4943 
4944     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4945     break;
4946   }
4947   case ISD::INTRINSIC_WO_CHAIN: {
4948     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4949     switch (IntNo) {
4950     default:
4951       llvm_unreachable(
4952           "Don't know how to custom type legalize this intrinsic!");
4953     case Intrinsic::riscv_orc_b: {
4954       // Lower to the GORCI encoding for orc.b with the operand extended.
4955       SDValue NewOp =
4956           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4957       // If Zbp is enabled, use GORCIW which will sign extend the result.
4958       unsigned Opc =
4959           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
4960       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
4961                                 DAG.getConstant(7, DL, MVT::i64));
4962       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4963       return;
4964     }
4965     case Intrinsic::riscv_grev:
4966     case Intrinsic::riscv_gorc: {
4967       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4968              "Unexpected custom legalisation");
4969       SDValue NewOp1 =
4970           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4971       SDValue NewOp2 =
4972           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4973       unsigned Opc =
4974           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
4975       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4976       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4977       break;
4978     }
4979     case Intrinsic::riscv_shfl:
4980     case Intrinsic::riscv_unshfl: {
4981       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4982              "Unexpected custom legalisation");
4983       SDValue NewOp1 =
4984           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4985       SDValue NewOp2 =
4986           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4987       unsigned Opc =
4988           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
4989       if (isa<ConstantSDNode>(N->getOperand(2))) {
4990         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4991                              DAG.getConstant(0xf, DL, MVT::i64));
4992         Opc =
4993             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4994       }
4995       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4996       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4997       break;
4998     }
4999     case Intrinsic::riscv_bcompress:
5000     case Intrinsic::riscv_bdecompress: {
5001       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5002              "Unexpected custom legalisation");
5003       SDValue NewOp1 =
5004           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5005       SDValue NewOp2 =
5006           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5007       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5008                          ? RISCVISD::BCOMPRESSW
5009                          : RISCVISD::BDECOMPRESSW;
5010       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5011       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5012       break;
5013     }
5014     case Intrinsic::riscv_vmv_x_s: {
5015       EVT VT = N->getValueType(0);
5016       MVT XLenVT = Subtarget.getXLenVT();
5017       if (VT.bitsLT(XLenVT)) {
5018         // Simple case just extract using vmv.x.s and truncate.
5019         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5020                                       Subtarget.getXLenVT(), N->getOperand(1));
5021         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5022         return;
5023       }
5024 
5025       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5026              "Unexpected custom legalization");
5027 
5028       // We need to do the move in two steps.
5029       SDValue Vec = N->getOperand(1);
5030       MVT VecVT = Vec.getSimpleValueType();
5031 
5032       // First extract the lower XLEN bits of the element.
5033       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5034 
5035       // To extract the upper XLEN bits of the vector element, shift the first
5036       // element right by 32 bits and re-extract the lower XLEN bits.
5037       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5038       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5039       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5040       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5041                                        DAG.getConstant(32, DL, XLenVT), VL);
5042       SDValue LShr32 =
5043           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5044       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5045 
5046       Results.push_back(
5047           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5048       break;
5049     }
5050     }
5051     break;
5052   }
5053   case ISD::VECREDUCE_ADD:
5054   case ISD::VECREDUCE_AND:
5055   case ISD::VECREDUCE_OR:
5056   case ISD::VECREDUCE_XOR:
5057   case ISD::VECREDUCE_SMAX:
5058   case ISD::VECREDUCE_UMAX:
5059   case ISD::VECREDUCE_SMIN:
5060   case ISD::VECREDUCE_UMIN:
5061     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5062       Results.push_back(V);
5063     break;
5064   case ISD::FLT_ROUNDS_: {
5065     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5066     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5067     Results.push_back(Res.getValue(0));
5068     Results.push_back(Res.getValue(1));
5069     break;
5070   }
5071   }
5072 }
5073 
5074 // A structure to hold one of the bit-manipulation patterns below. Together, a
5075 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5076 //   (or (and (shl x, 1), 0xAAAAAAAA),
5077 //       (and (srl x, 1), 0x55555555))
5078 struct RISCVBitmanipPat {
5079   SDValue Op;
5080   unsigned ShAmt;
5081   bool IsSHL;
5082 
5083   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5084     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5085   }
5086 };
5087 
5088 // Matches patterns of the form
5089 //   (and (shl x, C2), (C1 << C2))
5090 //   (and (srl x, C2), C1)
5091 //   (shl (and x, C1), C2)
5092 //   (srl (and x, (C1 << C2)), C2)
5093 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5094 // The expected masks for each shift amount are specified in BitmanipMasks where
5095 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5096 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5097 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5098 // XLen is 64.
5099 static Optional<RISCVBitmanipPat>
5100 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5101   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5102          "Unexpected number of masks");
5103   Optional<uint64_t> Mask;
5104   // Optionally consume a mask around the shift operation.
5105   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5106     Mask = Op.getConstantOperandVal(1);
5107     Op = Op.getOperand(0);
5108   }
5109   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5110     return None;
5111   bool IsSHL = Op.getOpcode() == ISD::SHL;
5112 
5113   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5114     return None;
5115   uint64_t ShAmt = Op.getConstantOperandVal(1);
5116 
5117   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5118   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
5119     return None;
5120   // If we don't have enough masks for 64 bit, then we must be trying to
5121   // match SHFL so we're only allowed to shift 1/4 of the width.
5122   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5123     return None;
5124 
5125   SDValue Src = Op.getOperand(0);
5126 
5127   // The expected mask is shifted left when the AND is found around SHL
5128   // patterns.
5129   //   ((x >> 1) & 0x55555555)
5130   //   ((x << 1) & 0xAAAAAAAA)
5131   bool SHLExpMask = IsSHL;
5132 
5133   if (!Mask) {
5134     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5135     // the mask is all ones: consume that now.
5136     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5137       Mask = Src.getConstantOperandVal(1);
5138       Src = Src.getOperand(0);
5139       // The expected mask is now in fact shifted left for SRL, so reverse the
5140       // decision.
5141       //   ((x & 0xAAAAAAAA) >> 1)
5142       //   ((x & 0x55555555) << 1)
5143       SHLExpMask = !SHLExpMask;
5144     } else {
5145       // Use a default shifted mask of all-ones if there's no AND, truncated
5146       // down to the expected width. This simplifies the logic later on.
5147       Mask = maskTrailingOnes<uint64_t>(Width);
5148       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5149     }
5150   }
5151 
5152   unsigned MaskIdx = Log2_32(ShAmt);
5153   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5154 
5155   if (SHLExpMask)
5156     ExpMask <<= ShAmt;
5157 
5158   if (Mask != ExpMask)
5159     return None;
5160 
5161   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5162 }
5163 
5164 // Matches any of the following bit-manipulation patterns:
5165 //   (and (shl x, 1), (0x55555555 << 1))
5166 //   (and (srl x, 1), 0x55555555)
5167 //   (shl (and x, 0x55555555), 1)
5168 //   (srl (and x, (0x55555555 << 1)), 1)
5169 // where the shift amount and mask may vary thus:
5170 //   [1]  = 0x55555555 / 0xAAAAAAAA
5171 //   [2]  = 0x33333333 / 0xCCCCCCCC
5172 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5173 //   [8]  = 0x00FF00FF / 0xFF00FF00
5174 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5175 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5176 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5177   // These are the unshifted masks which we use to match bit-manipulation
5178   // patterns. They may be shifted left in certain circumstances.
5179   static const uint64_t BitmanipMasks[] = {
5180       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5181       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5182 
5183   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5184 }
5185 
5186 // Match the following pattern as a GREVI(W) operation
5187 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5188 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5189                                const RISCVSubtarget &Subtarget) {
5190   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5191   EVT VT = Op.getValueType();
5192 
5193   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5194     auto LHS = matchGREVIPat(Op.getOperand(0));
5195     auto RHS = matchGREVIPat(Op.getOperand(1));
5196     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5197       SDLoc DL(Op);
5198       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5199                          DAG.getConstant(LHS->ShAmt, DL, VT));
5200     }
5201   }
5202   return SDValue();
5203 }
5204 
5205 // Matches any the following pattern as a GORCI(W) operation
5206 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5207 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5208 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5209 // Note that with the variant of 3.,
5210 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5211 // the inner pattern will first be matched as GREVI and then the outer
5212 // pattern will be matched to GORC via the first rule above.
5213 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5214 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5215                                const RISCVSubtarget &Subtarget) {
5216   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5217   EVT VT = Op.getValueType();
5218 
5219   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5220     SDLoc DL(Op);
5221     SDValue Op0 = Op.getOperand(0);
5222     SDValue Op1 = Op.getOperand(1);
5223 
5224     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5225       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5226           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5227           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5228         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5229       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5230       if ((Reverse.getOpcode() == ISD::ROTL ||
5231            Reverse.getOpcode() == ISD::ROTR) &&
5232           Reverse.getOperand(0) == X &&
5233           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5234         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5235         if (RotAmt == (VT.getSizeInBits() / 2))
5236           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5237                              DAG.getConstant(RotAmt, DL, VT));
5238       }
5239       return SDValue();
5240     };
5241 
5242     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5243     if (SDValue V = MatchOROfReverse(Op0, Op1))
5244       return V;
5245     if (SDValue V = MatchOROfReverse(Op1, Op0))
5246       return V;
5247 
5248     // OR is commutable so canonicalize its OR operand to the left
5249     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5250       std::swap(Op0, Op1);
5251     if (Op0.getOpcode() != ISD::OR)
5252       return SDValue();
5253     SDValue OrOp0 = Op0.getOperand(0);
5254     SDValue OrOp1 = Op0.getOperand(1);
5255     auto LHS = matchGREVIPat(OrOp0);
5256     // OR is commutable so swap the operands and try again: x might have been
5257     // on the left
5258     if (!LHS) {
5259       std::swap(OrOp0, OrOp1);
5260       LHS = matchGREVIPat(OrOp0);
5261     }
5262     auto RHS = matchGREVIPat(Op1);
5263     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5264       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5265                          DAG.getConstant(LHS->ShAmt, DL, VT));
5266     }
5267   }
5268   return SDValue();
5269 }
5270 
5271 // Matches any of the following bit-manipulation patterns:
5272 //   (and (shl x, 1), (0x22222222 << 1))
5273 //   (and (srl x, 1), 0x22222222)
5274 //   (shl (and x, 0x22222222), 1)
5275 //   (srl (and x, (0x22222222 << 1)), 1)
5276 // where the shift amount and mask may vary thus:
5277 //   [1]  = 0x22222222 / 0x44444444
5278 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5279 //   [4]  = 0x00F000F0 / 0x0F000F00
5280 //   [8]  = 0x0000FF00 / 0x00FF0000
5281 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5282 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5283   // These are the unshifted masks which we use to match bit-manipulation
5284   // patterns. They may be shifted left in certain circumstances.
5285   static const uint64_t BitmanipMasks[] = {
5286       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5287       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5288 
5289   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5290 }
5291 
5292 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5293 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5294                                const RISCVSubtarget &Subtarget) {
5295   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5296   EVT VT = Op.getValueType();
5297 
5298   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5299     return SDValue();
5300 
5301   SDValue Op0 = Op.getOperand(0);
5302   SDValue Op1 = Op.getOperand(1);
5303 
5304   // Or is commutable so canonicalize the second OR to the LHS.
5305   if (Op0.getOpcode() != ISD::OR)
5306     std::swap(Op0, Op1);
5307   if (Op0.getOpcode() != ISD::OR)
5308     return SDValue();
5309 
5310   // We found an inner OR, so our operands are the operands of the inner OR
5311   // and the other operand of the outer OR.
5312   SDValue A = Op0.getOperand(0);
5313   SDValue B = Op0.getOperand(1);
5314   SDValue C = Op1;
5315 
5316   auto Match1 = matchSHFLPat(A);
5317   auto Match2 = matchSHFLPat(B);
5318 
5319   // If neither matched, we failed.
5320   if (!Match1 && !Match2)
5321     return SDValue();
5322 
5323   // We had at least one match. if one failed, try the remaining C operand.
5324   if (!Match1) {
5325     std::swap(A, C);
5326     Match1 = matchSHFLPat(A);
5327     if (!Match1)
5328       return SDValue();
5329   } else if (!Match2) {
5330     std::swap(B, C);
5331     Match2 = matchSHFLPat(B);
5332     if (!Match2)
5333       return SDValue();
5334   }
5335   assert(Match1 && Match2);
5336 
5337   // Make sure our matches pair up.
5338   if (!Match1->formsPairWith(*Match2))
5339     return SDValue();
5340 
5341   // All the remains is to make sure C is an AND with the same input, that masks
5342   // out the bits that are being shuffled.
5343   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5344       C.getOperand(0) != Match1->Op)
5345     return SDValue();
5346 
5347   uint64_t Mask = C.getConstantOperandVal(1);
5348 
5349   static const uint64_t BitmanipMasks[] = {
5350       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5351       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5352   };
5353 
5354   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5355   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5356   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5357 
5358   if (Mask != ExpMask)
5359     return SDValue();
5360 
5361   SDLoc DL(Op);
5362   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5363                      DAG.getConstant(Match1->ShAmt, DL, VT));
5364 }
5365 
5366 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5367 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5368 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5369 // not undo itself, but they are redundant.
5370 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5371   SDValue Src = N->getOperand(0);
5372 
5373   if (Src.getOpcode() != N->getOpcode())
5374     return SDValue();
5375 
5376   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5377       !isa<ConstantSDNode>(Src.getOperand(1)))
5378     return SDValue();
5379 
5380   unsigned ShAmt1 = N->getConstantOperandVal(1);
5381   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5382   Src = Src.getOperand(0);
5383 
5384   unsigned CombinedShAmt;
5385   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5386     CombinedShAmt = ShAmt1 | ShAmt2;
5387   else
5388     CombinedShAmt = ShAmt1 ^ ShAmt2;
5389 
5390   if (CombinedShAmt == 0)
5391     return Src;
5392 
5393   SDLoc DL(N);
5394   return DAG.getNode(
5395       N->getOpcode(), DL, N->getValueType(0), Src,
5396       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5397 }
5398 
5399 // Combine a constant select operand into its use:
5400 //
5401 // (and (select_cc lhs, rhs, cc, -1, c), x)
5402 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5403 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5404 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5405 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5406 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5407 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5408                                      SelectionDAG &DAG, bool AllOnes) {
5409   EVT VT = N->getValueType(0);
5410 
5411   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5412     return SDValue();
5413 
5414   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5415     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5416   };
5417 
5418   bool SwapSelectOps;
5419   SDValue TrueVal = Slct.getOperand(3);
5420   SDValue FalseVal = Slct.getOperand(4);
5421   SDValue NonConstantVal;
5422   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5423     SwapSelectOps = false;
5424     NonConstantVal = FalseVal;
5425   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5426     SwapSelectOps = true;
5427     NonConstantVal = TrueVal;
5428   } else
5429     return SDValue();
5430 
5431   // Slct is now know to be the desired identity constant when CC is true.
5432   TrueVal = OtherOp;
5433   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5434   // Unless SwapSelectOps says CC should be false.
5435   if (SwapSelectOps)
5436     std::swap(TrueVal, FalseVal);
5437 
5438   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5439                      {Slct.getOperand(0), Slct.getOperand(1),
5440                       Slct.getOperand(2), TrueVal, FalseVal});
5441 }
5442 
5443 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5444 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5445                                                 bool AllOnes) {
5446   SDValue N0 = N->getOperand(0);
5447   SDValue N1 = N->getOperand(1);
5448   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5449     return Result;
5450   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5451     return Result;
5452   return SDValue();
5453 }
5454 
5455 static SDValue performANDCombine(SDNode *N,
5456                                  TargetLowering::DAGCombinerInfo &DCI,
5457                                  const RISCVSubtarget &Subtarget) {
5458   SelectionDAG &DAG = DCI.DAG;
5459 
5460   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5461   //      (select lhs, rhs, cc, x, (and x, y))
5462   return combineSelectCCAndUseCommutative(N, DAG, true);
5463 }
5464 
5465 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5466                                 const RISCVSubtarget &Subtarget) {
5467   SelectionDAG &DAG = DCI.DAG;
5468   if (Subtarget.hasStdExtZbp()) {
5469     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5470       return GREV;
5471     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5472       return GORC;
5473     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5474       return SHFL;
5475   }
5476 
5477   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5478   //      (select lhs, rhs, cc, x, (or x, y))
5479   return combineSelectCCAndUseCommutative(N, DAG, false);
5480 }
5481 
5482 static SDValue performXORCombine(SDNode *N,
5483                                  TargetLowering::DAGCombinerInfo &DCI,
5484                                  const RISCVSubtarget &Subtarget) {
5485   SelectionDAG &DAG = DCI.DAG;
5486 
5487   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5488   //      (select lhs, rhs, cc, x, (xor x, y))
5489   return combineSelectCCAndUseCommutative(N, DAG, false);
5490 }
5491 
5492 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5493                                                DAGCombinerInfo &DCI) const {
5494   SelectionDAG &DAG = DCI.DAG;
5495 
5496   switch (N->getOpcode()) {
5497   default:
5498     break;
5499   case RISCVISD::SplitF64: {
5500     SDValue Op0 = N->getOperand(0);
5501     // If the input to SplitF64 is just BuildPairF64 then the operation is
5502     // redundant. Instead, use BuildPairF64's operands directly.
5503     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5504       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5505 
5506     SDLoc DL(N);
5507 
5508     // It's cheaper to materialise two 32-bit integers than to load a double
5509     // from the constant pool and transfer it to integer registers through the
5510     // stack.
5511     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5512       APInt V = C->getValueAPF().bitcastToAPInt();
5513       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5514       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5515       return DCI.CombineTo(N, Lo, Hi);
5516     }
5517 
5518     // This is a target-specific version of a DAGCombine performed in
5519     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5520     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5521     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5522     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5523         !Op0.getNode()->hasOneUse())
5524       break;
5525     SDValue NewSplitF64 =
5526         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5527                     Op0.getOperand(0));
5528     SDValue Lo = NewSplitF64.getValue(0);
5529     SDValue Hi = NewSplitF64.getValue(1);
5530     APInt SignBit = APInt::getSignMask(32);
5531     if (Op0.getOpcode() == ISD::FNEG) {
5532       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5533                                   DAG.getConstant(SignBit, DL, MVT::i32));
5534       return DCI.CombineTo(N, Lo, NewHi);
5535     }
5536     assert(Op0.getOpcode() == ISD::FABS);
5537     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5538                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5539     return DCI.CombineTo(N, Lo, NewHi);
5540   }
5541   case RISCVISD::SLLW:
5542   case RISCVISD::SRAW:
5543   case RISCVISD::SRLW:
5544   case RISCVISD::ROLW:
5545   case RISCVISD::RORW: {
5546     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5547     SDValue LHS = N->getOperand(0);
5548     SDValue RHS = N->getOperand(1);
5549     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5550     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5551     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5552         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5553       if (N->getOpcode() != ISD::DELETED_NODE)
5554         DCI.AddToWorklist(N);
5555       return SDValue(N, 0);
5556     }
5557     break;
5558   }
5559   case RISCVISD::CLZW:
5560   case RISCVISD::CTZW: {
5561     // Only the lower 32 bits of the first operand are read
5562     SDValue Op0 = N->getOperand(0);
5563     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5564     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5565       if (N->getOpcode() != ISD::DELETED_NODE)
5566         DCI.AddToWorklist(N);
5567       return SDValue(N, 0);
5568     }
5569     break;
5570   }
5571   case RISCVISD::FSL:
5572   case RISCVISD::FSR: {
5573     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5574     SDValue ShAmt = N->getOperand(2);
5575     unsigned BitWidth = ShAmt.getValueSizeInBits();
5576     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5577     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5578     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5579       if (N->getOpcode() != ISD::DELETED_NODE)
5580         DCI.AddToWorklist(N);
5581       return SDValue(N, 0);
5582     }
5583     break;
5584   }
5585   case RISCVISD::FSLW:
5586   case RISCVISD::FSRW: {
5587     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5588     // read.
5589     SDValue Op0 = N->getOperand(0);
5590     SDValue Op1 = N->getOperand(1);
5591     SDValue ShAmt = N->getOperand(2);
5592     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5593     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5594     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5595         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5596         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5597       if (N->getOpcode() != ISD::DELETED_NODE)
5598         DCI.AddToWorklist(N);
5599       return SDValue(N, 0);
5600     }
5601     break;
5602   }
5603   case RISCVISD::GREV:
5604   case RISCVISD::GORC: {
5605     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5606     SDValue ShAmt = N->getOperand(1);
5607     unsigned BitWidth = ShAmt.getValueSizeInBits();
5608     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5609     APInt ShAmtMask(BitWidth, BitWidth - 1);
5610     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5611       if (N->getOpcode() != ISD::DELETED_NODE)
5612         DCI.AddToWorklist(N);
5613       return SDValue(N, 0);
5614     }
5615 
5616     return combineGREVI_GORCI(N, DCI.DAG);
5617   }
5618   case RISCVISD::GREVW:
5619   case RISCVISD::GORCW: {
5620     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5621     SDValue LHS = N->getOperand(0);
5622     SDValue RHS = N->getOperand(1);
5623     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5624     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5625     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5626         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5627       if (N->getOpcode() != ISD::DELETED_NODE)
5628         DCI.AddToWorklist(N);
5629       return SDValue(N, 0);
5630     }
5631 
5632     return combineGREVI_GORCI(N, DCI.DAG);
5633   }
5634   case RISCVISD::SHFL:
5635   case RISCVISD::UNSHFL: {
5636     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5637     SDValue ShAmt = N->getOperand(1);
5638     unsigned BitWidth = ShAmt.getValueSizeInBits();
5639     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5640     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5641     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5642       if (N->getOpcode() != ISD::DELETED_NODE)
5643         DCI.AddToWorklist(N);
5644       return SDValue(N, 0);
5645     }
5646 
5647     break;
5648   }
5649   case RISCVISD::SHFLW:
5650   case RISCVISD::UNSHFLW: {
5651     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5652     SDValue LHS = N->getOperand(0);
5653     SDValue RHS = N->getOperand(1);
5654     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5655     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5656     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5657         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5658       if (N->getOpcode() != ISD::DELETED_NODE)
5659         DCI.AddToWorklist(N);
5660       return SDValue(N, 0);
5661     }
5662 
5663     break;
5664   }
5665   case RISCVISD::BCOMPRESSW:
5666   case RISCVISD::BDECOMPRESSW: {
5667     // Only the lower 32 bits of LHS and RHS are read.
5668     SDValue LHS = N->getOperand(0);
5669     SDValue RHS = N->getOperand(1);
5670     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5671     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5672         SimplifyDemandedBits(RHS, Mask, DCI)) {
5673       if (N->getOpcode() != ISD::DELETED_NODE)
5674         DCI.AddToWorklist(N);
5675       return SDValue(N, 0);
5676     }
5677 
5678     break;
5679   }
5680   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5681     SDLoc DL(N);
5682     SDValue Op0 = N->getOperand(0);
5683     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5684     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5685     // of the FMV_W_X_RV64 operand.
5686     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5687       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5688              "Unexpected value type!");
5689       return Op0.getOperand(0);
5690     }
5691 
5692     // This is a target-specific version of a DAGCombine performed in
5693     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5694     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5695     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5696     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5697         !Op0.getNode()->hasOneUse())
5698       break;
5699     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5700                                  Op0.getOperand(0));
5701     APInt SignBit = APInt::getSignMask(32).sext(64);
5702     if (Op0.getOpcode() == ISD::FNEG)
5703       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5704                          DAG.getConstant(SignBit, DL, MVT::i64));
5705 
5706     assert(Op0.getOpcode() == ISD::FABS);
5707     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5708                        DAG.getConstant(~SignBit, DL, MVT::i64));
5709   }
5710   case ISD::AND:
5711     return performANDCombine(N, DCI, Subtarget);
5712   case ISD::OR:
5713     return performORCombine(N, DCI, Subtarget);
5714   case ISD::XOR:
5715     return performXORCombine(N, DCI, Subtarget);
5716   case RISCVISD::SELECT_CC: {
5717     // Transform
5718     SDValue LHS = N->getOperand(0);
5719     SDValue RHS = N->getOperand(1);
5720     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5721     if (!ISD::isIntEqualitySetCC(CCVal))
5722       break;
5723 
5724     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5725     //      (select_cc X, Y, lt, trueV, falseV)
5726     // Sometimes the setcc is introduced after select_cc has been formed.
5727     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5728         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5729       // If we're looking for eq 0 instead of ne 0, we need to invert the
5730       // condition.
5731       bool Invert = CCVal == ISD::SETEQ;
5732       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5733       if (Invert)
5734         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5735 
5736       SDLoc DL(N);
5737       RHS = LHS.getOperand(1);
5738       LHS = LHS.getOperand(0);
5739       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5740 
5741       SDValue TargetCC =
5742           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5743       return DAG.getNode(
5744           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5745           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5746     }
5747 
5748     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5749     //      (select_cc X, Y, eq/ne, trueV, falseV)
5750     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5751       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5752                          {LHS.getOperand(0), LHS.getOperand(1),
5753                           N->getOperand(2), N->getOperand(3),
5754                           N->getOperand(4)});
5755     // (select_cc X, 1, setne, trueV, falseV) ->
5756     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5757     // This can occur when legalizing some floating point comparisons.
5758     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5759     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5760       SDLoc DL(N);
5761       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5762       SDValue TargetCC =
5763           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5764       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5765       return DAG.getNode(
5766           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5767           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5768     }
5769 
5770     break;
5771   }
5772   case RISCVISD::BR_CC: {
5773     SDValue LHS = N->getOperand(1);
5774     SDValue RHS = N->getOperand(2);
5775     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5776     if (!ISD::isIntEqualitySetCC(CCVal))
5777       break;
5778 
5779     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5780     //      (br_cc X, Y, lt, dest)
5781     // Sometimes the setcc is introduced after br_cc has been formed.
5782     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5783         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5784       // If we're looking for eq 0 instead of ne 0, we need to invert the
5785       // condition.
5786       bool Invert = CCVal == ISD::SETEQ;
5787       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5788       if (Invert)
5789         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5790 
5791       SDLoc DL(N);
5792       RHS = LHS.getOperand(1);
5793       LHS = LHS.getOperand(0);
5794       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5795 
5796       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5797                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5798                          N->getOperand(4));
5799     }
5800 
5801     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5802     //      (br_cc X, Y, eq/ne, trueV, falseV)
5803     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5804       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5805                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5806                          N->getOperand(3), N->getOperand(4));
5807 
5808     // (br_cc X, 1, setne, br_cc) ->
5809     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5810     // This can occur when legalizing some floating point comparisons.
5811     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5812     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5813       SDLoc DL(N);
5814       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5815       SDValue TargetCC = DAG.getCondCode(CCVal);
5816       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5817       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5818                          N->getOperand(0), LHS, RHS, TargetCC,
5819                          N->getOperand(4));
5820     }
5821     break;
5822   }
5823   case ISD::FCOPYSIGN: {
5824     EVT VT = N->getValueType(0);
5825     if (!VT.isVector())
5826       break;
5827     // There is a form of VFSGNJ which injects the negated sign of its second
5828     // operand. Try and bubble any FNEG up after the extend/round to produce
5829     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5830     // TRUNC=1.
5831     SDValue In2 = N->getOperand(1);
5832     // Avoid cases where the extend/round has multiple uses, as duplicating
5833     // those is typically more expensive than removing a fneg.
5834     if (!In2.hasOneUse())
5835       break;
5836     if (In2.getOpcode() != ISD::FP_EXTEND &&
5837         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5838       break;
5839     In2 = In2.getOperand(0);
5840     if (In2.getOpcode() != ISD::FNEG)
5841       break;
5842     SDLoc DL(N);
5843     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5844     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5845                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5846   }
5847   case ISD::MGATHER:
5848   case ISD::MSCATTER: {
5849     if (!DCI.isBeforeLegalize())
5850       break;
5851     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5852     SDValue Index = MGSN->getIndex();
5853     EVT IndexVT = Index.getValueType();
5854     MVT XLenVT = Subtarget.getXLenVT();
5855     // RISCV indexed loads only support the "unsigned unscaled" addressing
5856     // mode, so anything else must be manually legalized.
5857     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5858                                 (MGSN->isIndexSigned() &&
5859                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5860     if (!NeedsIdxLegalization)
5861       break;
5862 
5863     SDLoc DL(N);
5864 
5865     // Any index legalization should first promote to XLenVT, so we don't lose
5866     // bits when scaling. This may create an illegal index type so we let
5867     // LLVM's legalization take care of the splitting.
5868     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5869       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5870       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5871                                                 : ISD::ZERO_EXTEND,
5872                           DL, IndexVT, Index);
5873     }
5874 
5875     unsigned Scale = N->getConstantOperandVal(5);
5876     if (MGSN->isIndexScaled() && Scale != 1) {
5877       // Manually scale the indices by the element size.
5878       // TODO: Sanitize the scale operand here?
5879       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5880       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5881       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5882     }
5883 
5884     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
5885     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
5886       return DAG.getMaskedGather(
5887           N->getVTList(), MGSN->getMemoryVT(), DL,
5888           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
5889            MGSN->getBasePtr(), Index, MGN->getScale()},
5890           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
5891     }
5892     const auto *MSN = cast<MaskedScatterSDNode>(N);
5893     return DAG.getMaskedScatter(
5894         N->getVTList(), MGSN->getMemoryVT(), DL,
5895         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
5896          Index, MGSN->getScale()},
5897         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
5898   }
5899   }
5900 
5901   return SDValue();
5902 }
5903 
5904 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
5905     const SDNode *N, CombineLevel Level) const {
5906   // The following folds are only desirable if `(OP _, c1 << c2)` can be
5907   // materialised in fewer instructions than `(OP _, c1)`:
5908   //
5909   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
5910   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
5911   SDValue N0 = N->getOperand(0);
5912   EVT Ty = N0.getValueType();
5913   if (Ty.isScalarInteger() &&
5914       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
5915     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
5916     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
5917     if (C1 && C2) {
5918       const APInt &C1Int = C1->getAPIntValue();
5919       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
5920 
5921       // We can materialise `c1 << c2` into an add immediate, so it's "free",
5922       // and the combine should happen, to potentially allow further combines
5923       // later.
5924       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
5925           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
5926         return true;
5927 
5928       // We can materialise `c1` in an add immediate, so it's "free", and the
5929       // combine should be prevented.
5930       if (C1Int.getMinSignedBits() <= 64 &&
5931           isLegalAddImmediate(C1Int.getSExtValue()))
5932         return false;
5933 
5934       // Neither constant will fit into an immediate, so find materialisation
5935       // costs.
5936       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
5937                                               Subtarget.is64Bit());
5938       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
5939           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
5940 
5941       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
5942       // combine should be prevented.
5943       if (C1Cost < ShiftedC1Cost)
5944         return false;
5945     }
5946   }
5947   return true;
5948 }
5949 
5950 bool RISCVTargetLowering::targetShrinkDemandedConstant(
5951     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
5952     TargetLoweringOpt &TLO) const {
5953   // Delay this optimization as late as possible.
5954   if (!TLO.LegalOps)
5955     return false;
5956 
5957   EVT VT = Op.getValueType();
5958   if (VT.isVector())
5959     return false;
5960 
5961   // Only handle AND for now.
5962   if (Op.getOpcode() != ISD::AND)
5963     return false;
5964 
5965   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5966   if (!C)
5967     return false;
5968 
5969   const APInt &Mask = C->getAPIntValue();
5970 
5971   // Clear all non-demanded bits initially.
5972   APInt ShrunkMask = Mask & DemandedBits;
5973 
5974   // Try to make a smaller immediate by setting undemanded bits.
5975 
5976   APInt ExpandedMask = Mask | ~DemandedBits;
5977 
5978   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
5979     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
5980   };
5981   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
5982     if (NewMask == Mask)
5983       return true;
5984     SDLoc DL(Op);
5985     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
5986     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
5987     return TLO.CombineTo(Op, NewOp);
5988   };
5989 
5990   // If the shrunk mask fits in sign extended 12 bits, let the target
5991   // independent code apply it.
5992   if (ShrunkMask.isSignedIntN(12))
5993     return false;
5994 
5995   // Preserve (and X, 0xffff) when zext.h is supported.
5996   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
5997     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
5998     if (IsLegalMask(NewMask))
5999       return UseMask(NewMask);
6000   }
6001 
6002   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6003   if (VT == MVT::i64) {
6004     APInt NewMask = APInt(64, 0xffffffff);
6005     if (IsLegalMask(NewMask))
6006       return UseMask(NewMask);
6007   }
6008 
6009   // For the remaining optimizations, we need to be able to make a negative
6010   // number through a combination of mask and undemanded bits.
6011   if (!ExpandedMask.isNegative())
6012     return false;
6013 
6014   // What is the fewest number of bits we need to represent the negative number.
6015   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6016 
6017   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6018   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6019   APInt NewMask = ShrunkMask;
6020   if (MinSignedBits <= 12)
6021     NewMask.setBitsFrom(11);
6022   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6023     NewMask.setBitsFrom(31);
6024   else
6025     return false;
6026 
6027   // Sanity check that our new mask is a subset of the demanded mask.
6028   assert(IsLegalMask(NewMask));
6029   return UseMask(NewMask);
6030 }
6031 
6032 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6033                                                         KnownBits &Known,
6034                                                         const APInt &DemandedElts,
6035                                                         const SelectionDAG &DAG,
6036                                                         unsigned Depth) const {
6037   unsigned BitWidth = Known.getBitWidth();
6038   unsigned Opc = Op.getOpcode();
6039   assert((Opc >= ISD::BUILTIN_OP_END ||
6040           Opc == ISD::INTRINSIC_WO_CHAIN ||
6041           Opc == ISD::INTRINSIC_W_CHAIN ||
6042           Opc == ISD::INTRINSIC_VOID) &&
6043          "Should use MaskedValueIsZero if you don't know whether Op"
6044          " is a target node!");
6045 
6046   Known.resetAll();
6047   switch (Opc) {
6048   default: break;
6049   case RISCVISD::SELECT_CC: {
6050     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6051     // If we don't know any bits, early out.
6052     if (Known.isUnknown())
6053       break;
6054     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6055 
6056     // Only known if known in both the LHS and RHS.
6057     Known = KnownBits::commonBits(Known, Known2);
6058     break;
6059   }
6060   case RISCVISD::REMUW: {
6061     KnownBits Known2;
6062     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6063     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6064     // We only care about the lower 32 bits.
6065     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6066     // Restore the original width by sign extending.
6067     Known = Known.sext(BitWidth);
6068     break;
6069   }
6070   case RISCVISD::DIVUW: {
6071     KnownBits Known2;
6072     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6073     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6074     // We only care about the lower 32 bits.
6075     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6076     // Restore the original width by sign extending.
6077     Known = Known.sext(BitWidth);
6078     break;
6079   }
6080   case RISCVISD::CTZW: {
6081     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6082     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6083     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6084     Known.Zero.setBitsFrom(LowBits);
6085     break;
6086   }
6087   case RISCVISD::CLZW: {
6088     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6089     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6090     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6091     Known.Zero.setBitsFrom(LowBits);
6092     break;
6093   }
6094   case RISCVISD::READ_VLENB:
6095     // We assume VLENB is at least 16 bytes.
6096     Known.Zero.setLowBits(4);
6097     break;
6098   case ISD::INTRINSIC_W_CHAIN: {
6099     unsigned IntNo = Op.getConstantOperandVal(1);
6100     switch (IntNo) {
6101     default:
6102       // We can't do anything for most intrinsics.
6103       break;
6104     case Intrinsic::riscv_vsetvli:
6105     case Intrinsic::riscv_vsetvlimax:
6106       // Assume that VL output is positive and would fit in an int32_t.
6107       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6108       if (BitWidth >= 32)
6109         Known.Zero.setBitsFrom(31);
6110       break;
6111     }
6112     break;
6113   }
6114   }
6115 }
6116 
6117 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6118     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6119     unsigned Depth) const {
6120   switch (Op.getOpcode()) {
6121   default:
6122     break;
6123   case RISCVISD::SLLW:
6124   case RISCVISD::SRAW:
6125   case RISCVISD::SRLW:
6126   case RISCVISD::DIVW:
6127   case RISCVISD::DIVUW:
6128   case RISCVISD::REMUW:
6129   case RISCVISD::ROLW:
6130   case RISCVISD::RORW:
6131   case RISCVISD::GREVW:
6132   case RISCVISD::GORCW:
6133   case RISCVISD::FSLW:
6134   case RISCVISD::FSRW:
6135   case RISCVISD::SHFLW:
6136   case RISCVISD::UNSHFLW:
6137   case RISCVISD::BCOMPRESSW:
6138   case RISCVISD::BDECOMPRESSW:
6139     // TODO: As the result is sign-extended, this is conservatively correct. A
6140     // more precise answer could be calculated for SRAW depending on known
6141     // bits in the shift amount.
6142     return 33;
6143   case RISCVISD::SHFL:
6144   case RISCVISD::UNSHFL: {
6145     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6146     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6147     // will stay within the upper 32 bits. If there were more than 32 sign bits
6148     // before there will be at least 33 sign bits after.
6149     if (Op.getValueType() == MVT::i64 &&
6150         isa<ConstantSDNode>(Op.getOperand(1)) &&
6151         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6152       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6153       if (Tmp > 32)
6154         return 33;
6155     }
6156     break;
6157   }
6158   case RISCVISD::VMV_X_S:
6159     // The number of sign bits of the scalar result is computed by obtaining the
6160     // element type of the input vector operand, subtracting its width from the
6161     // XLEN, and then adding one (sign bit within the element type). If the
6162     // element type is wider than XLen, the least-significant XLEN bits are
6163     // taken.
6164     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6165       return 1;
6166     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6167   }
6168 
6169   return 1;
6170 }
6171 
6172 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6173                                                   MachineBasicBlock *BB) {
6174   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6175 
6176   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6177   // Should the count have wrapped while it was being read, we need to try
6178   // again.
6179   // ...
6180   // read:
6181   // rdcycleh x3 # load high word of cycle
6182   // rdcycle  x2 # load low word of cycle
6183   // rdcycleh x4 # load high word of cycle
6184   // bne x3, x4, read # check if high word reads match, otherwise try again
6185   // ...
6186 
6187   MachineFunction &MF = *BB->getParent();
6188   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6189   MachineFunction::iterator It = ++BB->getIterator();
6190 
6191   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6192   MF.insert(It, LoopMBB);
6193 
6194   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6195   MF.insert(It, DoneMBB);
6196 
6197   // Transfer the remainder of BB and its successor edges to DoneMBB.
6198   DoneMBB->splice(DoneMBB->begin(), BB,
6199                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6200   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6201 
6202   BB->addSuccessor(LoopMBB);
6203 
6204   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6205   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6206   Register LoReg = MI.getOperand(0).getReg();
6207   Register HiReg = MI.getOperand(1).getReg();
6208   DebugLoc DL = MI.getDebugLoc();
6209 
6210   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6211   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6212       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6213       .addReg(RISCV::X0);
6214   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6215       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6216       .addReg(RISCV::X0);
6217   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6218       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6219       .addReg(RISCV::X0);
6220 
6221   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6222       .addReg(HiReg)
6223       .addReg(ReadAgainReg)
6224       .addMBB(LoopMBB);
6225 
6226   LoopMBB->addSuccessor(LoopMBB);
6227   LoopMBB->addSuccessor(DoneMBB);
6228 
6229   MI.eraseFromParent();
6230 
6231   return DoneMBB;
6232 }
6233 
6234 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6235                                              MachineBasicBlock *BB) {
6236   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6237 
6238   MachineFunction &MF = *BB->getParent();
6239   DebugLoc DL = MI.getDebugLoc();
6240   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6241   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6242   Register LoReg = MI.getOperand(0).getReg();
6243   Register HiReg = MI.getOperand(1).getReg();
6244   Register SrcReg = MI.getOperand(2).getReg();
6245   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6246   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6247 
6248   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6249                           RI);
6250   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6251   MachineMemOperand *MMOLo =
6252       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6253   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6254       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6255   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6256       .addFrameIndex(FI)
6257       .addImm(0)
6258       .addMemOperand(MMOLo);
6259   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6260       .addFrameIndex(FI)
6261       .addImm(4)
6262       .addMemOperand(MMOHi);
6263   MI.eraseFromParent(); // The pseudo instruction is gone now.
6264   return BB;
6265 }
6266 
6267 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6268                                                  MachineBasicBlock *BB) {
6269   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6270          "Unexpected instruction");
6271 
6272   MachineFunction &MF = *BB->getParent();
6273   DebugLoc DL = MI.getDebugLoc();
6274   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6275   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6276   Register DstReg = MI.getOperand(0).getReg();
6277   Register LoReg = MI.getOperand(1).getReg();
6278   Register HiReg = MI.getOperand(2).getReg();
6279   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6280   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6281 
6282   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6283   MachineMemOperand *MMOLo =
6284       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6285   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6286       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6287   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6288       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6289       .addFrameIndex(FI)
6290       .addImm(0)
6291       .addMemOperand(MMOLo);
6292   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6293       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6294       .addFrameIndex(FI)
6295       .addImm(4)
6296       .addMemOperand(MMOHi);
6297   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6298   MI.eraseFromParent(); // The pseudo instruction is gone now.
6299   return BB;
6300 }
6301 
6302 static bool isSelectPseudo(MachineInstr &MI) {
6303   switch (MI.getOpcode()) {
6304   default:
6305     return false;
6306   case RISCV::Select_GPR_Using_CC_GPR:
6307   case RISCV::Select_FPR16_Using_CC_GPR:
6308   case RISCV::Select_FPR32_Using_CC_GPR:
6309   case RISCV::Select_FPR64_Using_CC_GPR:
6310     return true;
6311   }
6312 }
6313 
6314 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6315                                            MachineBasicBlock *BB) {
6316   // To "insert" Select_* instructions, we actually have to insert the triangle
6317   // control-flow pattern.  The incoming instructions know the destination vreg
6318   // to set, the condition code register to branch on, the true/false values to
6319   // select between, and the condcode to use to select the appropriate branch.
6320   //
6321   // We produce the following control flow:
6322   //     HeadMBB
6323   //     |  \
6324   //     |  IfFalseMBB
6325   //     | /
6326   //    TailMBB
6327   //
6328   // When we find a sequence of selects we attempt to optimize their emission
6329   // by sharing the control flow. Currently we only handle cases where we have
6330   // multiple selects with the exact same condition (same LHS, RHS and CC).
6331   // The selects may be interleaved with other instructions if the other
6332   // instructions meet some requirements we deem safe:
6333   // - They are debug instructions. Otherwise,
6334   // - They do not have side-effects, do not access memory and their inputs do
6335   //   not depend on the results of the select pseudo-instructions.
6336   // The TrueV/FalseV operands of the selects cannot depend on the result of
6337   // previous selects in the sequence.
6338   // These conditions could be further relaxed. See the X86 target for a
6339   // related approach and more information.
6340   Register LHS = MI.getOperand(1).getReg();
6341   Register RHS = MI.getOperand(2).getReg();
6342   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6343 
6344   SmallVector<MachineInstr *, 4> SelectDebugValues;
6345   SmallSet<Register, 4> SelectDests;
6346   SelectDests.insert(MI.getOperand(0).getReg());
6347 
6348   MachineInstr *LastSelectPseudo = &MI;
6349 
6350   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6351        SequenceMBBI != E; ++SequenceMBBI) {
6352     if (SequenceMBBI->isDebugInstr())
6353       continue;
6354     else if (isSelectPseudo(*SequenceMBBI)) {
6355       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6356           SequenceMBBI->getOperand(2).getReg() != RHS ||
6357           SequenceMBBI->getOperand(3).getImm() != CC ||
6358           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6359           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6360         break;
6361       LastSelectPseudo = &*SequenceMBBI;
6362       SequenceMBBI->collectDebugValues(SelectDebugValues);
6363       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6364     } else {
6365       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6366           SequenceMBBI->mayLoadOrStore())
6367         break;
6368       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6369             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6370           }))
6371         break;
6372     }
6373   }
6374 
6375   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6376   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6377   DebugLoc DL = MI.getDebugLoc();
6378   MachineFunction::iterator I = ++BB->getIterator();
6379 
6380   MachineBasicBlock *HeadMBB = BB;
6381   MachineFunction *F = BB->getParent();
6382   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6383   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6384 
6385   F->insert(I, IfFalseMBB);
6386   F->insert(I, TailMBB);
6387 
6388   // Transfer debug instructions associated with the selects to TailMBB.
6389   for (MachineInstr *DebugInstr : SelectDebugValues) {
6390     TailMBB->push_back(DebugInstr->removeFromParent());
6391   }
6392 
6393   // Move all instructions after the sequence to TailMBB.
6394   TailMBB->splice(TailMBB->end(), HeadMBB,
6395                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6396   // Update machine-CFG edges by transferring all successors of the current
6397   // block to the new block which will contain the Phi nodes for the selects.
6398   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6399   // Set the successors for HeadMBB.
6400   HeadMBB->addSuccessor(IfFalseMBB);
6401   HeadMBB->addSuccessor(TailMBB);
6402 
6403   // Insert appropriate branch.
6404   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6405 
6406   BuildMI(HeadMBB, DL, TII.get(Opcode))
6407     .addReg(LHS)
6408     .addReg(RHS)
6409     .addMBB(TailMBB);
6410 
6411   // IfFalseMBB just falls through to TailMBB.
6412   IfFalseMBB->addSuccessor(TailMBB);
6413 
6414   // Create PHIs for all of the select pseudo-instructions.
6415   auto SelectMBBI = MI.getIterator();
6416   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6417   auto InsertionPoint = TailMBB->begin();
6418   while (SelectMBBI != SelectEnd) {
6419     auto Next = std::next(SelectMBBI);
6420     if (isSelectPseudo(*SelectMBBI)) {
6421       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6422       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6423               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6424           .addReg(SelectMBBI->getOperand(4).getReg())
6425           .addMBB(HeadMBB)
6426           .addReg(SelectMBBI->getOperand(5).getReg())
6427           .addMBB(IfFalseMBB);
6428       SelectMBBI->eraseFromParent();
6429     }
6430     SelectMBBI = Next;
6431   }
6432 
6433   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6434   return TailMBB;
6435 }
6436 
6437 static MachineInstr *elideCopies(MachineInstr *MI,
6438                                  const MachineRegisterInfo &MRI) {
6439   while (true) {
6440     if (!MI->isFullCopy())
6441       return MI;
6442     if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
6443       return nullptr;
6444     MI = MRI.getVRegDef(MI->getOperand(1).getReg());
6445     if (!MI)
6446       return nullptr;
6447   }
6448 }
6449 
6450 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
6451                                     int VLIndex, unsigned SEWIndex,
6452                                     RISCVII::VLMUL VLMul,
6453                                     bool ForceTailAgnostic) {
6454   MachineFunction &MF = *BB->getParent();
6455   DebugLoc DL = MI.getDebugLoc();
6456   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6457 
6458   unsigned Log2SEW = MI.getOperand(SEWIndex).getImm();
6459   unsigned SEW = 1 << Log2SEW;
6460   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
6461 
6462   MachineRegisterInfo &MRI = MF.getRegInfo();
6463 
6464   auto BuildVSETVLI = [&]() {
6465     if (VLIndex >= 0) {
6466       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
6467       const MachineOperand &VLOp = MI.getOperand(VLIndex);
6468 
6469       // VL can be a register or an immediate.
6470       if (VLOp.isImm())
6471         return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
6472             .addReg(DestReg, RegState::Define | RegState::Dead)
6473             .addImm(VLOp.getImm());
6474 
6475       Register VLReg = MI.getOperand(VLIndex).getReg();
6476       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6477           .addReg(DestReg, RegState::Define | RegState::Dead)
6478           .addReg(VLReg);
6479     }
6480 
6481     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
6482     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6483         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
6484         .addReg(RISCV::X0, RegState::Kill);
6485   };
6486 
6487   MachineInstrBuilder MIB = BuildVSETVLI();
6488 
6489   // Default to tail agnostic unless the destination is tied to a source. In
6490   // that case the user would have some control over the tail values. The tail
6491   // policy is also ignored on instructions that only update element 0 like
6492   // vmv.s.x or reductions so use agnostic there to match the common case.
6493   // FIXME: This is conservatively correct, but we might want to detect that
6494   // the input is undefined.
6495   bool TailAgnostic = true;
6496   unsigned UseOpIdx;
6497   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
6498     TailAgnostic = false;
6499     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
6500     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
6501     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
6502     if (UseMI) {
6503       UseMI = elideCopies(UseMI, MRI);
6504       if (UseMI && UseMI->isImplicitDef())
6505         TailAgnostic = true;
6506     }
6507   }
6508 
6509   // For simplicity we reuse the vtype representation here.
6510   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, SEW,
6511                                      /*TailAgnostic*/ TailAgnostic,
6512                                      /*MaskAgnostic*/ false));
6513 
6514   // Remove (now) redundant operands from pseudo
6515   if (VLIndex >= 0 && MI.getOperand(VLIndex).isReg()) {
6516     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
6517     MI.getOperand(VLIndex).setIsKill(false);
6518   }
6519 
6520   return BB;
6521 }
6522 
6523 MachineBasicBlock *
6524 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6525                                                  MachineBasicBlock *BB) const {
6526   uint64_t TSFlags = MI.getDesc().TSFlags;
6527 
6528   if (RISCVII::hasSEWOp(TSFlags)) {
6529     unsigned NumOperands = MI.getNumExplicitOperands();
6530     int VLIndex = RISCVII::hasVLOp(TSFlags) ? NumOperands - 2 : -1;
6531     unsigned SEWIndex = NumOperands - 1;
6532     bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
6533 
6534     RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
6535     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
6536   }
6537 
6538   switch (MI.getOpcode()) {
6539   default:
6540     llvm_unreachable("Unexpected instr type to insert");
6541   case RISCV::ReadCycleWide:
6542     assert(!Subtarget.is64Bit() &&
6543            "ReadCycleWrite is only to be used on riscv32");
6544     return emitReadCycleWidePseudo(MI, BB);
6545   case RISCV::Select_GPR_Using_CC_GPR:
6546   case RISCV::Select_FPR16_Using_CC_GPR:
6547   case RISCV::Select_FPR32_Using_CC_GPR:
6548   case RISCV::Select_FPR64_Using_CC_GPR:
6549     return emitSelectPseudo(MI, BB);
6550   case RISCV::BuildPairF64Pseudo:
6551     return emitBuildPairF64Pseudo(MI, BB);
6552   case RISCV::SplitF64Pseudo:
6553     return emitSplitF64Pseudo(MI, BB);
6554   }
6555 }
6556 
6557 // Calling Convention Implementation.
6558 // The expectations for frontend ABI lowering vary from target to target.
6559 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6560 // details, but this is a longer term goal. For now, we simply try to keep the
6561 // role of the frontend as simple and well-defined as possible. The rules can
6562 // be summarised as:
6563 // * Never split up large scalar arguments. We handle them here.
6564 // * If a hardfloat calling convention is being used, and the struct may be
6565 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6566 // available, then pass as two separate arguments. If either the GPRs or FPRs
6567 // are exhausted, then pass according to the rule below.
6568 // * If a struct could never be passed in registers or directly in a stack
6569 // slot (as it is larger than 2*XLEN and the floating point rules don't
6570 // apply), then pass it using a pointer with the byval attribute.
6571 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6572 // word-sized array or a 2*XLEN scalar (depending on alignment).
6573 // * The frontend can determine whether a struct is returned by reference or
6574 // not based on its size and fields. If it will be returned by reference, the
6575 // frontend must modify the prototype so a pointer with the sret annotation is
6576 // passed as the first argument. This is not necessary for large scalar
6577 // returns.
6578 // * Struct return values and varargs should be coerced to structs containing
6579 // register-size fields in the same situations they would be for fixed
6580 // arguments.
6581 
6582 static const MCPhysReg ArgGPRs[] = {
6583   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6584   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6585 };
6586 static const MCPhysReg ArgFPR16s[] = {
6587   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6588   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6589 };
6590 static const MCPhysReg ArgFPR32s[] = {
6591   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6592   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6593 };
6594 static const MCPhysReg ArgFPR64s[] = {
6595   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6596   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6597 };
6598 // This is an interim calling convention and it may be changed in the future.
6599 static const MCPhysReg ArgVRs[] = {
6600     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6601     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6602     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6603 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6604                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6605                                      RISCV::V20M2, RISCV::V22M2};
6606 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6607                                      RISCV::V20M4};
6608 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6609 
6610 // Pass a 2*XLEN argument that has been split into two XLEN values through
6611 // registers or the stack as necessary.
6612 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6613                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6614                                 MVT ValVT2, MVT LocVT2,
6615                                 ISD::ArgFlagsTy ArgFlags2) {
6616   unsigned XLenInBytes = XLen / 8;
6617   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6618     // At least one half can be passed via register.
6619     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6620                                      VA1.getLocVT(), CCValAssign::Full));
6621   } else {
6622     // Both halves must be passed on the stack, with proper alignment.
6623     Align StackAlign =
6624         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6625     State.addLoc(
6626         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6627                             State.AllocateStack(XLenInBytes, StackAlign),
6628                             VA1.getLocVT(), CCValAssign::Full));
6629     State.addLoc(CCValAssign::getMem(
6630         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6631         LocVT2, CCValAssign::Full));
6632     return false;
6633   }
6634 
6635   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6636     // The second half can also be passed via register.
6637     State.addLoc(
6638         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6639   } else {
6640     // The second half is passed via the stack, without additional alignment.
6641     State.addLoc(CCValAssign::getMem(
6642         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6643         LocVT2, CCValAssign::Full));
6644   }
6645 
6646   return false;
6647 }
6648 
6649 // Implements the RISC-V calling convention. Returns true upon failure.
6650 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6651                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6652                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6653                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6654                      Optional<unsigned> FirstMaskArgument) {
6655   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6656   assert(XLen == 32 || XLen == 64);
6657   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6658 
6659   // Any return value split in to more than two values can't be returned
6660   // directly. Vectors are returned via the available vector registers.
6661   if (!LocVT.isVector() && IsRet && ValNo > 1)
6662     return true;
6663 
6664   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6665   // variadic argument, or if no F16/F32 argument registers are available.
6666   bool UseGPRForF16_F32 = true;
6667   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6668   // variadic argument, or if no F64 argument registers are available.
6669   bool UseGPRForF64 = true;
6670 
6671   switch (ABI) {
6672   default:
6673     llvm_unreachable("Unexpected ABI");
6674   case RISCVABI::ABI_ILP32:
6675   case RISCVABI::ABI_LP64:
6676     break;
6677   case RISCVABI::ABI_ILP32F:
6678   case RISCVABI::ABI_LP64F:
6679     UseGPRForF16_F32 = !IsFixed;
6680     break;
6681   case RISCVABI::ABI_ILP32D:
6682   case RISCVABI::ABI_LP64D:
6683     UseGPRForF16_F32 = !IsFixed;
6684     UseGPRForF64 = !IsFixed;
6685     break;
6686   }
6687 
6688   // FPR16, FPR32, and FPR64 alias each other.
6689   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6690     UseGPRForF16_F32 = true;
6691     UseGPRForF64 = true;
6692   }
6693 
6694   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6695   // similar local variables rather than directly checking against the target
6696   // ABI.
6697 
6698   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6699     LocVT = XLenVT;
6700     LocInfo = CCValAssign::BCvt;
6701   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6702     LocVT = MVT::i64;
6703     LocInfo = CCValAssign::BCvt;
6704   }
6705 
6706   // If this is a variadic argument, the RISC-V calling convention requires
6707   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6708   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6709   // be used regardless of whether the original argument was split during
6710   // legalisation or not. The argument will not be passed by registers if the
6711   // original type is larger than 2*XLEN, so the register alignment rule does
6712   // not apply.
6713   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6714   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6715       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6716     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6717     // Skip 'odd' register if necessary.
6718     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6719       State.AllocateReg(ArgGPRs);
6720   }
6721 
6722   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6723   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6724       State.getPendingArgFlags();
6725 
6726   assert(PendingLocs.size() == PendingArgFlags.size() &&
6727          "PendingLocs and PendingArgFlags out of sync");
6728 
6729   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6730   // registers are exhausted.
6731   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6732     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6733            "Can't lower f64 if it is split");
6734     // Depending on available argument GPRS, f64 may be passed in a pair of
6735     // GPRs, split between a GPR and the stack, or passed completely on the
6736     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6737     // cases.
6738     Register Reg = State.AllocateReg(ArgGPRs);
6739     LocVT = MVT::i32;
6740     if (!Reg) {
6741       unsigned StackOffset = State.AllocateStack(8, Align(8));
6742       State.addLoc(
6743           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6744       return false;
6745     }
6746     if (!State.AllocateReg(ArgGPRs))
6747       State.AllocateStack(4, Align(4));
6748     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6749     return false;
6750   }
6751 
6752   // Fixed-length vectors are located in the corresponding scalable-vector
6753   // container types.
6754   if (ValVT.isFixedLengthVector())
6755     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6756 
6757   // Split arguments might be passed indirectly, so keep track of the pending
6758   // values. Split vectors are passed via a mix of registers and indirectly, so
6759   // treat them as we would any other argument.
6760   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6761     LocVT = XLenVT;
6762     LocInfo = CCValAssign::Indirect;
6763     PendingLocs.push_back(
6764         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6765     PendingArgFlags.push_back(ArgFlags);
6766     if (!ArgFlags.isSplitEnd()) {
6767       return false;
6768     }
6769   }
6770 
6771   // If the split argument only had two elements, it should be passed directly
6772   // in registers or on the stack.
6773   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6774     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6775     // Apply the normal calling convention rules to the first half of the
6776     // split argument.
6777     CCValAssign VA = PendingLocs[0];
6778     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6779     PendingLocs.clear();
6780     PendingArgFlags.clear();
6781     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6782                                ArgFlags);
6783   }
6784 
6785   // Allocate to a register if possible, or else a stack slot.
6786   Register Reg;
6787   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6788     Reg = State.AllocateReg(ArgFPR16s);
6789   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6790     Reg = State.AllocateReg(ArgFPR32s);
6791   else if (ValVT == MVT::f64 && !UseGPRForF64)
6792     Reg = State.AllocateReg(ArgFPR64s);
6793   else if (ValVT.isVector()) {
6794     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6795     if (RC == &RISCV::VRRegClass) {
6796       // Assign the first mask argument to V0.
6797       // This is an interim calling convention and it may be changed in the
6798       // future.
6799       if (FirstMaskArgument.hasValue() &&
6800           ValNo == FirstMaskArgument.getValue()) {
6801         Reg = State.AllocateReg(RISCV::V0);
6802       } else {
6803         Reg = State.AllocateReg(ArgVRs);
6804       }
6805     } else if (RC == &RISCV::VRM2RegClass) {
6806       Reg = State.AllocateReg(ArgVRM2s);
6807     } else if (RC == &RISCV::VRM4RegClass) {
6808       Reg = State.AllocateReg(ArgVRM4s);
6809     } else if (RC == &RISCV::VRM8RegClass) {
6810       Reg = State.AllocateReg(ArgVRM8s);
6811     } else {
6812       llvm_unreachable("Unhandled class register for ValueType");
6813     }
6814     if (!Reg) {
6815       // For return values, the vector must be passed fully via registers or
6816       // via the stack.
6817       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6818       // but we're using all of them.
6819       if (IsRet)
6820         return true;
6821       LocInfo = CCValAssign::Indirect;
6822       // Try using a GPR to pass the address
6823       Reg = State.AllocateReg(ArgGPRs);
6824       LocVT = XLenVT;
6825     }
6826   } else
6827     Reg = State.AllocateReg(ArgGPRs);
6828   unsigned StackOffset =
6829       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
6830 
6831   // If we reach this point and PendingLocs is non-empty, we must be at the
6832   // end of a split argument that must be passed indirectly.
6833   if (!PendingLocs.empty()) {
6834     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6835     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6836 
6837     for (auto &It : PendingLocs) {
6838       if (Reg)
6839         It.convertToReg(Reg);
6840       else
6841         It.convertToMem(StackOffset);
6842       State.addLoc(It);
6843     }
6844     PendingLocs.clear();
6845     PendingArgFlags.clear();
6846     return false;
6847   }
6848 
6849   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6850           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6851          "Expected an XLenVT or vector types at this stage");
6852 
6853   if (Reg) {
6854     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6855     return false;
6856   }
6857 
6858   // When a floating-point value is passed on the stack, no bit-conversion is
6859   // needed.
6860   if (ValVT.isFloatingPoint()) {
6861     LocVT = ValVT;
6862     LocInfo = CCValAssign::Full;
6863   }
6864   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6865   return false;
6866 }
6867 
6868 template <typename ArgTy>
6869 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6870   for (const auto &ArgIdx : enumerate(Args)) {
6871     MVT ArgVT = ArgIdx.value().VT;
6872     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6873       return ArgIdx.index();
6874   }
6875   return None;
6876 }
6877 
6878 void RISCVTargetLowering::analyzeInputArgs(
6879     MachineFunction &MF, CCState &CCInfo,
6880     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
6881   unsigned NumArgs = Ins.size();
6882   FunctionType *FType = MF.getFunction().getFunctionType();
6883 
6884   Optional<unsigned> FirstMaskArgument;
6885   if (Subtarget.hasStdExtV())
6886     FirstMaskArgument = preAssignMask(Ins);
6887 
6888   for (unsigned i = 0; i != NumArgs; ++i) {
6889     MVT ArgVT = Ins[i].VT;
6890     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6891 
6892     Type *ArgTy = nullptr;
6893     if (IsRet)
6894       ArgTy = FType->getReturnType();
6895     else if (Ins[i].isOrigArg())
6896       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6897 
6898     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6899     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6900                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
6901                  FirstMaskArgument)) {
6902       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
6903                         << EVT(ArgVT).getEVTString() << '\n');
6904       llvm_unreachable(nullptr);
6905     }
6906   }
6907 }
6908 
6909 void RISCVTargetLowering::analyzeOutputArgs(
6910     MachineFunction &MF, CCState &CCInfo,
6911     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
6912     CallLoweringInfo *CLI) const {
6913   unsigned NumArgs = Outs.size();
6914 
6915   Optional<unsigned> FirstMaskArgument;
6916   if (Subtarget.hasStdExtV())
6917     FirstMaskArgument = preAssignMask(Outs);
6918 
6919   for (unsigned i = 0; i != NumArgs; i++) {
6920     MVT ArgVT = Outs[i].VT;
6921     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6922     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
6923 
6924     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6925     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6926                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
6927                  FirstMaskArgument)) {
6928       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
6929                         << EVT(ArgVT).getEVTString() << "\n");
6930       llvm_unreachable(nullptr);
6931     }
6932   }
6933 }
6934 
6935 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
6936 // values.
6937 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
6938                                    const CCValAssign &VA, const SDLoc &DL,
6939                                    const RISCVSubtarget &Subtarget) {
6940   switch (VA.getLocInfo()) {
6941   default:
6942     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6943   case CCValAssign::Full:
6944     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
6945       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
6946     break;
6947   case CCValAssign::BCvt:
6948     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6949       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
6950     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6951       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
6952     else
6953       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6954     break;
6955   }
6956   return Val;
6957 }
6958 
6959 // The caller is responsible for loading the full value if the argument is
6960 // passed with CCValAssign::Indirect.
6961 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
6962                                 const CCValAssign &VA, const SDLoc &DL,
6963                                 const RISCVTargetLowering &TLI) {
6964   MachineFunction &MF = DAG.getMachineFunction();
6965   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6966   EVT LocVT = VA.getLocVT();
6967   SDValue Val;
6968   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
6969   Register VReg = RegInfo.createVirtualRegister(RC);
6970   RegInfo.addLiveIn(VA.getLocReg(), VReg);
6971   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
6972 
6973   if (VA.getLocInfo() == CCValAssign::Indirect)
6974     return Val;
6975 
6976   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
6977 }
6978 
6979 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
6980                                    const CCValAssign &VA, const SDLoc &DL,
6981                                    const RISCVSubtarget &Subtarget) {
6982   EVT LocVT = VA.getLocVT();
6983 
6984   switch (VA.getLocInfo()) {
6985   default:
6986     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6987   case CCValAssign::Full:
6988     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
6989       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
6990     break;
6991   case CCValAssign::BCvt:
6992     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6993       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
6994     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6995       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
6996     else
6997       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
6998     break;
6999   }
7000   return Val;
7001 }
7002 
7003 // The caller is responsible for loading the full value if the argument is
7004 // passed with CCValAssign::Indirect.
7005 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7006                                 const CCValAssign &VA, const SDLoc &DL) {
7007   MachineFunction &MF = DAG.getMachineFunction();
7008   MachineFrameInfo &MFI = MF.getFrameInfo();
7009   EVT LocVT = VA.getLocVT();
7010   EVT ValVT = VA.getValVT();
7011   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7012   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
7013                                  VA.getLocMemOffset(), /*Immutable=*/true);
7014   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7015   SDValue Val;
7016 
7017   ISD::LoadExtType ExtType;
7018   switch (VA.getLocInfo()) {
7019   default:
7020     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7021   case CCValAssign::Full:
7022   case CCValAssign::Indirect:
7023   case CCValAssign::BCvt:
7024     ExtType = ISD::NON_EXTLOAD;
7025     break;
7026   }
7027   Val = DAG.getExtLoad(
7028       ExtType, DL, LocVT, Chain, FIN,
7029       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7030   return Val;
7031 }
7032 
7033 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7034                                        const CCValAssign &VA, const SDLoc &DL) {
7035   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7036          "Unexpected VA");
7037   MachineFunction &MF = DAG.getMachineFunction();
7038   MachineFrameInfo &MFI = MF.getFrameInfo();
7039   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7040 
7041   if (VA.isMemLoc()) {
7042     // f64 is passed on the stack.
7043     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7044     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7045     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7046                        MachinePointerInfo::getFixedStack(MF, FI));
7047   }
7048 
7049   assert(VA.isRegLoc() && "Expected register VA assignment");
7050 
7051   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7052   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7053   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7054   SDValue Hi;
7055   if (VA.getLocReg() == RISCV::X17) {
7056     // Second half of f64 is passed on the stack.
7057     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7058     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7059     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7060                      MachinePointerInfo::getFixedStack(MF, FI));
7061   } else {
7062     // Second half of f64 is passed in another GPR.
7063     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7064     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7065     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7066   }
7067   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7068 }
7069 
7070 // FastCC has less than 1% performance improvement for some particular
7071 // benchmark. But theoretically, it may has benenfit for some cases.
7072 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
7073                             CCValAssign::LocInfo LocInfo,
7074                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
7075 
7076   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7077     // X5 and X6 might be used for save-restore libcall.
7078     static const MCPhysReg GPRList[] = {
7079         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7080         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7081         RISCV::X29, RISCV::X30, RISCV::X31};
7082     if (unsigned Reg = State.AllocateReg(GPRList)) {
7083       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7084       return false;
7085     }
7086   }
7087 
7088   if (LocVT == MVT::f16) {
7089     static const MCPhysReg FPR16List[] = {
7090         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7091         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7092         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7093         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7094     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7095       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7096       return false;
7097     }
7098   }
7099 
7100   if (LocVT == MVT::f32) {
7101     static const MCPhysReg FPR32List[] = {
7102         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7103         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7104         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7105         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7106     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7107       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7108       return false;
7109     }
7110   }
7111 
7112   if (LocVT == MVT::f64) {
7113     static const MCPhysReg FPR64List[] = {
7114         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7115         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7116         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7117         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7118     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7119       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7120       return false;
7121     }
7122   }
7123 
7124   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7125     unsigned Offset4 = State.AllocateStack(4, Align(4));
7126     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7127     return false;
7128   }
7129 
7130   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7131     unsigned Offset5 = State.AllocateStack(8, Align(8));
7132     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7133     return false;
7134   }
7135 
7136   return true; // CC didn't match.
7137 }
7138 
7139 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7140                          CCValAssign::LocInfo LocInfo,
7141                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7142 
7143   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7144     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7145     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7146     static const MCPhysReg GPRList[] = {
7147         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7148         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7149     if (unsigned Reg = State.AllocateReg(GPRList)) {
7150       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7151       return false;
7152     }
7153   }
7154 
7155   if (LocVT == MVT::f32) {
7156     // Pass in STG registers: F1, ..., F6
7157     //                        fs0 ... fs5
7158     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7159                                           RISCV::F18_F, RISCV::F19_F,
7160                                           RISCV::F20_F, RISCV::F21_F};
7161     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7162       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7163       return false;
7164     }
7165   }
7166 
7167   if (LocVT == MVT::f64) {
7168     // Pass in STG registers: D1, ..., D6
7169     //                        fs6 ... fs11
7170     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7171                                           RISCV::F24_D, RISCV::F25_D,
7172                                           RISCV::F26_D, RISCV::F27_D};
7173     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7174       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7175       return false;
7176     }
7177   }
7178 
7179   report_fatal_error("No registers left in GHC calling convention");
7180   return true;
7181 }
7182 
7183 // Transform physical registers into virtual registers.
7184 SDValue RISCVTargetLowering::LowerFormalArguments(
7185     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7186     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7187     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7188 
7189   MachineFunction &MF = DAG.getMachineFunction();
7190 
7191   switch (CallConv) {
7192   default:
7193     report_fatal_error("Unsupported calling convention");
7194   case CallingConv::C:
7195   case CallingConv::Fast:
7196     break;
7197   case CallingConv::GHC:
7198     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7199         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7200       report_fatal_error(
7201         "GHC calling convention requires the F and D instruction set extensions");
7202   }
7203 
7204   const Function &Func = MF.getFunction();
7205   if (Func.hasFnAttribute("interrupt")) {
7206     if (!Func.arg_empty())
7207       report_fatal_error(
7208         "Functions with the interrupt attribute cannot have arguments!");
7209 
7210     StringRef Kind =
7211       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7212 
7213     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7214       report_fatal_error(
7215         "Function interrupt attribute argument not supported!");
7216   }
7217 
7218   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7219   MVT XLenVT = Subtarget.getXLenVT();
7220   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7221   // Used with vargs to acumulate store chains.
7222   std::vector<SDValue> OutChains;
7223 
7224   // Assign locations to all of the incoming arguments.
7225   SmallVector<CCValAssign, 16> ArgLocs;
7226   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7227 
7228   if (CallConv == CallingConv::Fast)
7229     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
7230   else if (CallConv == CallingConv::GHC)
7231     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7232   else
7233     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
7234 
7235   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7236     CCValAssign &VA = ArgLocs[i];
7237     SDValue ArgValue;
7238     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7239     // case.
7240     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7241       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7242     else if (VA.isRegLoc())
7243       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7244     else
7245       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7246 
7247     if (VA.getLocInfo() == CCValAssign::Indirect) {
7248       // If the original argument was split and passed by reference (e.g. i128
7249       // on RV32), we need to load all parts of it here (using the same
7250       // address). Vectors may be partly split to registers and partly to the
7251       // stack, in which case the base address is partly offset and subsequent
7252       // stores are relative to that.
7253       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7254                                    MachinePointerInfo()));
7255       unsigned ArgIndex = Ins[i].OrigArgIndex;
7256       unsigned ArgPartOffset = Ins[i].PartOffset;
7257       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7258       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7259         CCValAssign &PartVA = ArgLocs[i + 1];
7260         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7261         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
7262                                       DAG.getIntPtrConstant(PartOffset, DL));
7263         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7264                                      MachinePointerInfo()));
7265         ++i;
7266       }
7267       continue;
7268     }
7269     InVals.push_back(ArgValue);
7270   }
7271 
7272   if (IsVarArg) {
7273     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7274     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7275     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7276     MachineFrameInfo &MFI = MF.getFrameInfo();
7277     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7278     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7279 
7280     // Offset of the first variable argument from stack pointer, and size of
7281     // the vararg save area. For now, the varargs save area is either zero or
7282     // large enough to hold a0-a7.
7283     int VaArgOffset, VarArgsSaveSize;
7284 
7285     // If all registers are allocated, then all varargs must be passed on the
7286     // stack and we don't need to save any argregs.
7287     if (ArgRegs.size() == Idx) {
7288       VaArgOffset = CCInfo.getNextStackOffset();
7289       VarArgsSaveSize = 0;
7290     } else {
7291       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7292       VaArgOffset = -VarArgsSaveSize;
7293     }
7294 
7295     // Record the frame index of the first variable argument
7296     // which is a value necessary to VASTART.
7297     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7298     RVFI->setVarArgsFrameIndex(FI);
7299 
7300     // If saving an odd number of registers then create an extra stack slot to
7301     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7302     // offsets to even-numbered registered remain 2*XLEN-aligned.
7303     if (Idx % 2) {
7304       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7305       VarArgsSaveSize += XLenInBytes;
7306     }
7307 
7308     // Copy the integer registers that may have been used for passing varargs
7309     // to the vararg save area.
7310     for (unsigned I = Idx; I < ArgRegs.size();
7311          ++I, VaArgOffset += XLenInBytes) {
7312       const Register Reg = RegInfo.createVirtualRegister(RC);
7313       RegInfo.addLiveIn(ArgRegs[I], Reg);
7314       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7315       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7316       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7317       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7318                                    MachinePointerInfo::getFixedStack(MF, FI));
7319       cast<StoreSDNode>(Store.getNode())
7320           ->getMemOperand()
7321           ->setValue((Value *)nullptr);
7322       OutChains.push_back(Store);
7323     }
7324     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7325   }
7326 
7327   // All stores are grouped in one node to allow the matching between
7328   // the size of Ins and InVals. This only happens for vararg functions.
7329   if (!OutChains.empty()) {
7330     OutChains.push_back(Chain);
7331     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7332   }
7333 
7334   return Chain;
7335 }
7336 
7337 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7338 /// for tail call optimization.
7339 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7340 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7341     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7342     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7343 
7344   auto &Callee = CLI.Callee;
7345   auto CalleeCC = CLI.CallConv;
7346   auto &Outs = CLI.Outs;
7347   auto &Caller = MF.getFunction();
7348   auto CallerCC = Caller.getCallingConv();
7349 
7350   // Exception-handling functions need a special set of instructions to
7351   // indicate a return to the hardware. Tail-calling another function would
7352   // probably break this.
7353   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7354   // should be expanded as new function attributes are introduced.
7355   if (Caller.hasFnAttribute("interrupt"))
7356     return false;
7357 
7358   // Do not tail call opt if the stack is used to pass parameters.
7359   if (CCInfo.getNextStackOffset() != 0)
7360     return false;
7361 
7362   // Do not tail call opt if any parameters need to be passed indirectly.
7363   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7364   // passed indirectly. So the address of the value will be passed in a
7365   // register, or if not available, then the address is put on the stack. In
7366   // order to pass indirectly, space on the stack often needs to be allocated
7367   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7368   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7369   // are passed CCValAssign::Indirect.
7370   for (auto &VA : ArgLocs)
7371     if (VA.getLocInfo() == CCValAssign::Indirect)
7372       return false;
7373 
7374   // Do not tail call opt if either caller or callee uses struct return
7375   // semantics.
7376   auto IsCallerStructRet = Caller.hasStructRetAttr();
7377   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7378   if (IsCallerStructRet || IsCalleeStructRet)
7379     return false;
7380 
7381   // Externally-defined functions with weak linkage should not be
7382   // tail-called. The behaviour of branch instructions in this situation (as
7383   // used for tail calls) is implementation-defined, so we cannot rely on the
7384   // linker replacing the tail call with a return.
7385   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7386     const GlobalValue *GV = G->getGlobal();
7387     if (GV->hasExternalWeakLinkage())
7388       return false;
7389   }
7390 
7391   // The callee has to preserve all registers the caller needs to preserve.
7392   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7393   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7394   if (CalleeCC != CallerCC) {
7395     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7396     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7397       return false;
7398   }
7399 
7400   // Byval parameters hand the function a pointer directly into the stack area
7401   // we want to reuse during a tail call. Working around this *is* possible
7402   // but less efficient and uglier in LowerCall.
7403   for (auto &Arg : Outs)
7404     if (Arg.Flags.isByVal())
7405       return false;
7406 
7407   return true;
7408 }
7409 
7410 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7411   return DAG.getDataLayout().getPrefTypeAlign(
7412       VT.getTypeForEVT(*DAG.getContext()));
7413 }
7414 
7415 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7416 // and output parameter nodes.
7417 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7418                                        SmallVectorImpl<SDValue> &InVals) const {
7419   SelectionDAG &DAG = CLI.DAG;
7420   SDLoc &DL = CLI.DL;
7421   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7422   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7423   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7424   SDValue Chain = CLI.Chain;
7425   SDValue Callee = CLI.Callee;
7426   bool &IsTailCall = CLI.IsTailCall;
7427   CallingConv::ID CallConv = CLI.CallConv;
7428   bool IsVarArg = CLI.IsVarArg;
7429   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7430   MVT XLenVT = Subtarget.getXLenVT();
7431 
7432   MachineFunction &MF = DAG.getMachineFunction();
7433 
7434   // Analyze the operands of the call, assigning locations to each operand.
7435   SmallVector<CCValAssign, 16> ArgLocs;
7436   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7437 
7438   if (CallConv == CallingConv::Fast)
7439     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
7440   else if (CallConv == CallingConv::GHC)
7441     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7442   else
7443     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
7444 
7445   // Check if it's really possible to do a tail call.
7446   if (IsTailCall)
7447     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7448 
7449   if (IsTailCall)
7450     ++NumTailCalls;
7451   else if (CLI.CB && CLI.CB->isMustTailCall())
7452     report_fatal_error("failed to perform tail call elimination on a call "
7453                        "site marked musttail");
7454 
7455   // Get a count of how many bytes are to be pushed on the stack.
7456   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7457 
7458   // Create local copies for byval args
7459   SmallVector<SDValue, 8> ByValArgs;
7460   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7461     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7462     if (!Flags.isByVal())
7463       continue;
7464 
7465     SDValue Arg = OutVals[i];
7466     unsigned Size = Flags.getByValSize();
7467     Align Alignment = Flags.getNonZeroByValAlign();
7468 
7469     int FI =
7470         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7471     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7472     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7473 
7474     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7475                           /*IsVolatile=*/false,
7476                           /*AlwaysInline=*/false, IsTailCall,
7477                           MachinePointerInfo(), MachinePointerInfo());
7478     ByValArgs.push_back(FIPtr);
7479   }
7480 
7481   if (!IsTailCall)
7482     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7483 
7484   // Copy argument values to their designated locations.
7485   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7486   SmallVector<SDValue, 8> MemOpChains;
7487   SDValue StackPtr;
7488   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7489     CCValAssign &VA = ArgLocs[i];
7490     SDValue ArgValue = OutVals[i];
7491     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7492 
7493     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7494     bool IsF64OnRV32DSoftABI =
7495         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7496     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7497       SDValue SplitF64 = DAG.getNode(
7498           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7499       SDValue Lo = SplitF64.getValue(0);
7500       SDValue Hi = SplitF64.getValue(1);
7501 
7502       Register RegLo = VA.getLocReg();
7503       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7504 
7505       if (RegLo == RISCV::X17) {
7506         // Second half of f64 is passed on the stack.
7507         // Work out the address of the stack slot.
7508         if (!StackPtr.getNode())
7509           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7510         // Emit the store.
7511         MemOpChains.push_back(
7512             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7513       } else {
7514         // Second half of f64 is passed in another GPR.
7515         assert(RegLo < RISCV::X31 && "Invalid register pair");
7516         Register RegHigh = RegLo + 1;
7517         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7518       }
7519       continue;
7520     }
7521 
7522     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7523     // as any other MemLoc.
7524 
7525     // Promote the value if needed.
7526     // For now, only handle fully promoted and indirect arguments.
7527     if (VA.getLocInfo() == CCValAssign::Indirect) {
7528       // Store the argument in a stack slot and pass its address.
7529       Align StackAlign =
7530           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7531                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7532       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7533       // If the original argument was split (e.g. i128), we need
7534       // to store the required parts of it here (and pass just one address).
7535       // Vectors may be partly split to registers and partly to the stack, in
7536       // which case the base address is partly offset and subsequent stores are
7537       // relative to that.
7538       unsigned ArgIndex = Outs[i].OrigArgIndex;
7539       unsigned ArgPartOffset = Outs[i].PartOffset;
7540       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7541       // Calculate the total size to store. We don't have access to what we're
7542       // actually storing other than performing the loop and collecting the
7543       // info.
7544       SmallVector<std::pair<SDValue, unsigned>> Parts;
7545       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7546         SDValue PartValue = OutVals[i + 1];
7547         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7548         EVT PartVT = PartValue.getValueType();
7549         StoredSize += PartVT.getStoreSize();
7550         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7551         Parts.push_back(std::make_pair(PartValue, PartOffset));
7552         ++i;
7553       }
7554       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7555       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7556       MemOpChains.push_back(
7557           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7558                        MachinePointerInfo::getFixedStack(MF, FI)));
7559       for (const auto &Part : Parts) {
7560         SDValue PartValue = Part.first;
7561         unsigned PartOffset = Part.second;
7562         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
7563                                       DAG.getIntPtrConstant(PartOffset, DL));
7564         MemOpChains.push_back(
7565             DAG.getStore(Chain, DL, PartValue, Address,
7566                          MachinePointerInfo::getFixedStack(MF, FI)));
7567       }
7568       ArgValue = SpillSlot;
7569     } else {
7570       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7571     }
7572 
7573     // Use local copy if it is a byval arg.
7574     if (Flags.isByVal())
7575       ArgValue = ByValArgs[j++];
7576 
7577     if (VA.isRegLoc()) {
7578       // Queue up the argument copies and emit them at the end.
7579       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7580     } else {
7581       assert(VA.isMemLoc() && "Argument not register or memory");
7582       assert(!IsTailCall && "Tail call not allowed if stack is used "
7583                             "for passing parameters");
7584 
7585       // Work out the address of the stack slot.
7586       if (!StackPtr.getNode())
7587         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7588       SDValue Address =
7589           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7590                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7591 
7592       // Emit the store.
7593       MemOpChains.push_back(
7594           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7595     }
7596   }
7597 
7598   // Join the stores, which are independent of one another.
7599   if (!MemOpChains.empty())
7600     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7601 
7602   SDValue Glue;
7603 
7604   // Build a sequence of copy-to-reg nodes, chained and glued together.
7605   for (auto &Reg : RegsToPass) {
7606     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7607     Glue = Chain.getValue(1);
7608   }
7609 
7610   // Validate that none of the argument registers have been marked as
7611   // reserved, if so report an error. Do the same for the return address if this
7612   // is not a tailcall.
7613   validateCCReservedRegs(RegsToPass, MF);
7614   if (!IsTailCall &&
7615       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7616     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7617         MF.getFunction(),
7618         "Return address register required, but has been reserved."});
7619 
7620   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7621   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7622   // split it and then direct call can be matched by PseudoCALL.
7623   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7624     const GlobalValue *GV = S->getGlobal();
7625 
7626     unsigned OpFlags = RISCVII::MO_CALL;
7627     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7628       OpFlags = RISCVII::MO_PLT;
7629 
7630     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7631   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7632     unsigned OpFlags = RISCVII::MO_CALL;
7633 
7634     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7635                                                  nullptr))
7636       OpFlags = RISCVII::MO_PLT;
7637 
7638     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7639   }
7640 
7641   // The first call operand is the chain and the second is the target address.
7642   SmallVector<SDValue, 8> Ops;
7643   Ops.push_back(Chain);
7644   Ops.push_back(Callee);
7645 
7646   // Add argument registers to the end of the list so that they are
7647   // known live into the call.
7648   for (auto &Reg : RegsToPass)
7649     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7650 
7651   if (!IsTailCall) {
7652     // Add a register mask operand representing the call-preserved registers.
7653     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7654     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7655     assert(Mask && "Missing call preserved mask for calling convention");
7656     Ops.push_back(DAG.getRegisterMask(Mask));
7657   }
7658 
7659   // Glue the call to the argument copies, if any.
7660   if (Glue.getNode())
7661     Ops.push_back(Glue);
7662 
7663   // Emit the call.
7664   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7665 
7666   if (IsTailCall) {
7667     MF.getFrameInfo().setHasTailCall();
7668     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7669   }
7670 
7671   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7672   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7673   Glue = Chain.getValue(1);
7674 
7675   // Mark the end of the call, which is glued to the call itself.
7676   Chain = DAG.getCALLSEQ_END(Chain,
7677                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7678                              DAG.getConstant(0, DL, PtrVT, true),
7679                              Glue, DL);
7680   Glue = Chain.getValue(1);
7681 
7682   // Assign locations to each value returned by this call.
7683   SmallVector<CCValAssign, 16> RVLocs;
7684   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7685   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
7686 
7687   // Copy all of the result registers out of their specified physreg.
7688   for (auto &VA : RVLocs) {
7689     // Copy the value out
7690     SDValue RetValue =
7691         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7692     // Glue the RetValue to the end of the call sequence
7693     Chain = RetValue.getValue(1);
7694     Glue = RetValue.getValue(2);
7695 
7696     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7697       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7698       SDValue RetValue2 =
7699           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7700       Chain = RetValue2.getValue(1);
7701       Glue = RetValue2.getValue(2);
7702       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7703                              RetValue2);
7704     }
7705 
7706     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7707 
7708     InVals.push_back(RetValue);
7709   }
7710 
7711   return Chain;
7712 }
7713 
7714 bool RISCVTargetLowering::CanLowerReturn(
7715     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7716     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7717   SmallVector<CCValAssign, 16> RVLocs;
7718   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7719 
7720   Optional<unsigned> FirstMaskArgument;
7721   if (Subtarget.hasStdExtV())
7722     FirstMaskArgument = preAssignMask(Outs);
7723 
7724   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7725     MVT VT = Outs[i].VT;
7726     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7727     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7728     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7729                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7730                  *this, FirstMaskArgument))
7731       return false;
7732   }
7733   return true;
7734 }
7735 
7736 SDValue
7737 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7738                                  bool IsVarArg,
7739                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7740                                  const SmallVectorImpl<SDValue> &OutVals,
7741                                  const SDLoc &DL, SelectionDAG &DAG) const {
7742   const MachineFunction &MF = DAG.getMachineFunction();
7743   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7744 
7745   // Stores the assignment of the return value to a location.
7746   SmallVector<CCValAssign, 16> RVLocs;
7747 
7748   // Info about the registers and stack slot.
7749   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7750                  *DAG.getContext());
7751 
7752   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7753                     nullptr);
7754 
7755   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7756     report_fatal_error("GHC functions return void only");
7757 
7758   SDValue Glue;
7759   SmallVector<SDValue, 4> RetOps(1, Chain);
7760 
7761   // Copy the result values into the output registers.
7762   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7763     SDValue Val = OutVals[i];
7764     CCValAssign &VA = RVLocs[i];
7765     assert(VA.isRegLoc() && "Can only return in registers!");
7766 
7767     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7768       // Handle returning f64 on RV32D with a soft float ABI.
7769       assert(VA.isRegLoc() && "Expected return via registers");
7770       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7771                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7772       SDValue Lo = SplitF64.getValue(0);
7773       SDValue Hi = SplitF64.getValue(1);
7774       Register RegLo = VA.getLocReg();
7775       assert(RegLo < RISCV::X31 && "Invalid register pair");
7776       Register RegHi = RegLo + 1;
7777 
7778       if (STI.isRegisterReservedByUser(RegLo) ||
7779           STI.isRegisterReservedByUser(RegHi))
7780         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7781             MF.getFunction(),
7782             "Return value register required, but has been reserved."});
7783 
7784       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7785       Glue = Chain.getValue(1);
7786       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7787       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7788       Glue = Chain.getValue(1);
7789       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7790     } else {
7791       // Handle a 'normal' return.
7792       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7793       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7794 
7795       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7796         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7797             MF.getFunction(),
7798             "Return value register required, but has been reserved."});
7799 
7800       // Guarantee that all emitted copies are stuck together.
7801       Glue = Chain.getValue(1);
7802       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7803     }
7804   }
7805 
7806   RetOps[0] = Chain; // Update chain.
7807 
7808   // Add the glue node if we have it.
7809   if (Glue.getNode()) {
7810     RetOps.push_back(Glue);
7811   }
7812 
7813   // Interrupt service routines use different return instructions.
7814   const Function &Func = DAG.getMachineFunction().getFunction();
7815   if (Func.hasFnAttribute("interrupt")) {
7816     if (!Func.getReturnType()->isVoidTy())
7817       report_fatal_error(
7818           "Functions with the interrupt attribute must have void return type!");
7819 
7820     MachineFunction &MF = DAG.getMachineFunction();
7821     StringRef Kind =
7822       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7823 
7824     unsigned RetOpc;
7825     if (Kind == "user")
7826       RetOpc = RISCVISD::URET_FLAG;
7827     else if (Kind == "supervisor")
7828       RetOpc = RISCVISD::SRET_FLAG;
7829     else
7830       RetOpc = RISCVISD::MRET_FLAG;
7831 
7832     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7833   }
7834 
7835   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7836 }
7837 
7838 void RISCVTargetLowering::validateCCReservedRegs(
7839     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7840     MachineFunction &MF) const {
7841   const Function &F = MF.getFunction();
7842   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7843 
7844   if (llvm::any_of(Regs, [&STI](auto Reg) {
7845         return STI.isRegisterReservedByUser(Reg.first);
7846       }))
7847     F.getContext().diagnose(DiagnosticInfoUnsupported{
7848         F, "Argument register required, but has been reserved."});
7849 }
7850 
7851 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7852   return CI->isTailCall();
7853 }
7854 
7855 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7856 #define NODE_NAME_CASE(NODE)                                                   \
7857   case RISCVISD::NODE:                                                         \
7858     return "RISCVISD::" #NODE;
7859   // clang-format off
7860   switch ((RISCVISD::NodeType)Opcode) {
7861   case RISCVISD::FIRST_NUMBER:
7862     break;
7863   NODE_NAME_CASE(RET_FLAG)
7864   NODE_NAME_CASE(URET_FLAG)
7865   NODE_NAME_CASE(SRET_FLAG)
7866   NODE_NAME_CASE(MRET_FLAG)
7867   NODE_NAME_CASE(CALL)
7868   NODE_NAME_CASE(SELECT_CC)
7869   NODE_NAME_CASE(BR_CC)
7870   NODE_NAME_CASE(BuildPairF64)
7871   NODE_NAME_CASE(SplitF64)
7872   NODE_NAME_CASE(TAIL)
7873   NODE_NAME_CASE(MULHSU)
7874   NODE_NAME_CASE(SLLW)
7875   NODE_NAME_CASE(SRAW)
7876   NODE_NAME_CASE(SRLW)
7877   NODE_NAME_CASE(DIVW)
7878   NODE_NAME_CASE(DIVUW)
7879   NODE_NAME_CASE(REMUW)
7880   NODE_NAME_CASE(ROLW)
7881   NODE_NAME_CASE(RORW)
7882   NODE_NAME_CASE(CLZW)
7883   NODE_NAME_CASE(CTZW)
7884   NODE_NAME_CASE(FSLW)
7885   NODE_NAME_CASE(FSRW)
7886   NODE_NAME_CASE(FSL)
7887   NODE_NAME_CASE(FSR)
7888   NODE_NAME_CASE(FMV_H_X)
7889   NODE_NAME_CASE(FMV_X_ANYEXTH)
7890   NODE_NAME_CASE(FMV_W_X_RV64)
7891   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
7892   NODE_NAME_CASE(READ_CYCLE_WIDE)
7893   NODE_NAME_CASE(GREV)
7894   NODE_NAME_CASE(GREVW)
7895   NODE_NAME_CASE(GORC)
7896   NODE_NAME_CASE(GORCW)
7897   NODE_NAME_CASE(SHFL)
7898   NODE_NAME_CASE(SHFLW)
7899   NODE_NAME_CASE(UNSHFL)
7900   NODE_NAME_CASE(UNSHFLW)
7901   NODE_NAME_CASE(BCOMPRESS)
7902   NODE_NAME_CASE(BCOMPRESSW)
7903   NODE_NAME_CASE(BDECOMPRESS)
7904   NODE_NAME_CASE(BDECOMPRESSW)
7905   NODE_NAME_CASE(VMV_V_X_VL)
7906   NODE_NAME_CASE(VFMV_V_F_VL)
7907   NODE_NAME_CASE(VMV_X_S)
7908   NODE_NAME_CASE(VMV_S_X_VL)
7909   NODE_NAME_CASE(VFMV_S_F_VL)
7910   NODE_NAME_CASE(SPLAT_VECTOR_I64)
7911   NODE_NAME_CASE(READ_VLENB)
7912   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
7913   NODE_NAME_CASE(VSLIDEUP_VL)
7914   NODE_NAME_CASE(VSLIDE1UP_VL)
7915   NODE_NAME_CASE(VSLIDEDOWN_VL)
7916   NODE_NAME_CASE(VSLIDE1DOWN_VL)
7917   NODE_NAME_CASE(VID_VL)
7918   NODE_NAME_CASE(VFNCVT_ROD_VL)
7919   NODE_NAME_CASE(VECREDUCE_ADD_VL)
7920   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
7921   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
7922   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
7923   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
7924   NODE_NAME_CASE(VECREDUCE_AND_VL)
7925   NODE_NAME_CASE(VECREDUCE_OR_VL)
7926   NODE_NAME_CASE(VECREDUCE_XOR_VL)
7927   NODE_NAME_CASE(VECREDUCE_FADD_VL)
7928   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
7929   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
7930   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
7931   NODE_NAME_CASE(ADD_VL)
7932   NODE_NAME_CASE(AND_VL)
7933   NODE_NAME_CASE(MUL_VL)
7934   NODE_NAME_CASE(OR_VL)
7935   NODE_NAME_CASE(SDIV_VL)
7936   NODE_NAME_CASE(SHL_VL)
7937   NODE_NAME_CASE(SREM_VL)
7938   NODE_NAME_CASE(SRA_VL)
7939   NODE_NAME_CASE(SRL_VL)
7940   NODE_NAME_CASE(SUB_VL)
7941   NODE_NAME_CASE(UDIV_VL)
7942   NODE_NAME_CASE(UREM_VL)
7943   NODE_NAME_CASE(XOR_VL)
7944   NODE_NAME_CASE(FADD_VL)
7945   NODE_NAME_CASE(FSUB_VL)
7946   NODE_NAME_CASE(FMUL_VL)
7947   NODE_NAME_CASE(FDIV_VL)
7948   NODE_NAME_CASE(FNEG_VL)
7949   NODE_NAME_CASE(FABS_VL)
7950   NODE_NAME_CASE(FSQRT_VL)
7951   NODE_NAME_CASE(FMA_VL)
7952   NODE_NAME_CASE(FCOPYSIGN_VL)
7953   NODE_NAME_CASE(SMIN_VL)
7954   NODE_NAME_CASE(SMAX_VL)
7955   NODE_NAME_CASE(UMIN_VL)
7956   NODE_NAME_CASE(UMAX_VL)
7957   NODE_NAME_CASE(FMINNUM_VL)
7958   NODE_NAME_CASE(FMAXNUM_VL)
7959   NODE_NAME_CASE(MULHS_VL)
7960   NODE_NAME_CASE(MULHU_VL)
7961   NODE_NAME_CASE(FP_TO_SINT_VL)
7962   NODE_NAME_CASE(FP_TO_UINT_VL)
7963   NODE_NAME_CASE(SINT_TO_FP_VL)
7964   NODE_NAME_CASE(UINT_TO_FP_VL)
7965   NODE_NAME_CASE(FP_EXTEND_VL)
7966   NODE_NAME_CASE(FP_ROUND_VL)
7967   NODE_NAME_CASE(SETCC_VL)
7968   NODE_NAME_CASE(VSELECT_VL)
7969   NODE_NAME_CASE(VMAND_VL)
7970   NODE_NAME_CASE(VMOR_VL)
7971   NODE_NAME_CASE(VMXOR_VL)
7972   NODE_NAME_CASE(VMCLR_VL)
7973   NODE_NAME_CASE(VMSET_VL)
7974   NODE_NAME_CASE(VRGATHER_VX_VL)
7975   NODE_NAME_CASE(VRGATHER_VV_VL)
7976   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
7977   NODE_NAME_CASE(VSEXT_VL)
7978   NODE_NAME_CASE(VZEXT_VL)
7979   NODE_NAME_CASE(VPOPC_VL)
7980   NODE_NAME_CASE(VLE_VL)
7981   NODE_NAME_CASE(VSE_VL)
7982   NODE_NAME_CASE(READ_CSR)
7983   NODE_NAME_CASE(WRITE_CSR)
7984   NODE_NAME_CASE(SWAP_CSR)
7985   }
7986   // clang-format on
7987   return nullptr;
7988 #undef NODE_NAME_CASE
7989 }
7990 
7991 /// getConstraintType - Given a constraint letter, return the type of
7992 /// constraint it is for this target.
7993 RISCVTargetLowering::ConstraintType
7994 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
7995   if (Constraint.size() == 1) {
7996     switch (Constraint[0]) {
7997     default:
7998       break;
7999     case 'f':
8000     case 'v':
8001       return C_RegisterClass;
8002     case 'I':
8003     case 'J':
8004     case 'K':
8005       return C_Immediate;
8006     case 'A':
8007       return C_Memory;
8008     }
8009   }
8010   return TargetLowering::getConstraintType(Constraint);
8011 }
8012 
8013 std::pair<unsigned, const TargetRegisterClass *>
8014 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8015                                                   StringRef Constraint,
8016                                                   MVT VT) const {
8017   // First, see if this is a constraint that directly corresponds to a
8018   // RISCV register class.
8019   if (Constraint.size() == 1) {
8020     switch (Constraint[0]) {
8021     case 'r':
8022       return std::make_pair(0U, &RISCV::GPRRegClass);
8023     case 'f':
8024       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8025         return std::make_pair(0U, &RISCV::FPR16RegClass);
8026       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8027         return std::make_pair(0U, &RISCV::FPR32RegClass);
8028       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8029         return std::make_pair(0U, &RISCV::FPR64RegClass);
8030       break;
8031     case 'v':
8032       for (const auto *RC :
8033            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8034             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8035         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8036           return std::make_pair(0U, RC);
8037       }
8038       break;
8039     default:
8040       break;
8041     }
8042   }
8043 
8044   // Clang will correctly decode the usage of register name aliases into their
8045   // official names. However, other frontends like `rustc` do not. This allows
8046   // users of these frontends to use the ABI names for registers in LLVM-style
8047   // register constraints.
8048   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8049                                .Case("{zero}", RISCV::X0)
8050                                .Case("{ra}", RISCV::X1)
8051                                .Case("{sp}", RISCV::X2)
8052                                .Case("{gp}", RISCV::X3)
8053                                .Case("{tp}", RISCV::X4)
8054                                .Case("{t0}", RISCV::X5)
8055                                .Case("{t1}", RISCV::X6)
8056                                .Case("{t2}", RISCV::X7)
8057                                .Cases("{s0}", "{fp}", RISCV::X8)
8058                                .Case("{s1}", RISCV::X9)
8059                                .Case("{a0}", RISCV::X10)
8060                                .Case("{a1}", RISCV::X11)
8061                                .Case("{a2}", RISCV::X12)
8062                                .Case("{a3}", RISCV::X13)
8063                                .Case("{a4}", RISCV::X14)
8064                                .Case("{a5}", RISCV::X15)
8065                                .Case("{a6}", RISCV::X16)
8066                                .Case("{a7}", RISCV::X17)
8067                                .Case("{s2}", RISCV::X18)
8068                                .Case("{s3}", RISCV::X19)
8069                                .Case("{s4}", RISCV::X20)
8070                                .Case("{s5}", RISCV::X21)
8071                                .Case("{s6}", RISCV::X22)
8072                                .Case("{s7}", RISCV::X23)
8073                                .Case("{s8}", RISCV::X24)
8074                                .Case("{s9}", RISCV::X25)
8075                                .Case("{s10}", RISCV::X26)
8076                                .Case("{s11}", RISCV::X27)
8077                                .Case("{t3}", RISCV::X28)
8078                                .Case("{t4}", RISCV::X29)
8079                                .Case("{t5}", RISCV::X30)
8080                                .Case("{t6}", RISCV::X31)
8081                                .Default(RISCV::NoRegister);
8082   if (XRegFromAlias != RISCV::NoRegister)
8083     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8084 
8085   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8086   // TableGen record rather than the AsmName to choose registers for InlineAsm
8087   // constraints, plus we want to match those names to the widest floating point
8088   // register type available, manually select floating point registers here.
8089   //
8090   // The second case is the ABI name of the register, so that frontends can also
8091   // use the ABI names in register constraint lists.
8092   if (Subtarget.hasStdExtF()) {
8093     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8094                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8095                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8096                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8097                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8098                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8099                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8100                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8101                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8102                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8103                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8104                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8105                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8106                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8107                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8108                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8109                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8110                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8111                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8112                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8113                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8114                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8115                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8116                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8117                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8118                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8119                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8120                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8121                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8122                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8123                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8124                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8125                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8126                         .Default(RISCV::NoRegister);
8127     if (FReg != RISCV::NoRegister) {
8128       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8129       if (Subtarget.hasStdExtD()) {
8130         unsigned RegNo = FReg - RISCV::F0_F;
8131         unsigned DReg = RISCV::F0_D + RegNo;
8132         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8133       }
8134       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8135     }
8136   }
8137 
8138   if (Subtarget.hasStdExtV()) {
8139     Register VReg = StringSwitch<Register>(Constraint.lower())
8140                         .Case("{v0}", RISCV::V0)
8141                         .Case("{v1}", RISCV::V1)
8142                         .Case("{v2}", RISCV::V2)
8143                         .Case("{v3}", RISCV::V3)
8144                         .Case("{v4}", RISCV::V4)
8145                         .Case("{v5}", RISCV::V5)
8146                         .Case("{v6}", RISCV::V6)
8147                         .Case("{v7}", RISCV::V7)
8148                         .Case("{v8}", RISCV::V8)
8149                         .Case("{v9}", RISCV::V9)
8150                         .Case("{v10}", RISCV::V10)
8151                         .Case("{v11}", RISCV::V11)
8152                         .Case("{v12}", RISCV::V12)
8153                         .Case("{v13}", RISCV::V13)
8154                         .Case("{v14}", RISCV::V14)
8155                         .Case("{v15}", RISCV::V15)
8156                         .Case("{v16}", RISCV::V16)
8157                         .Case("{v17}", RISCV::V17)
8158                         .Case("{v18}", RISCV::V18)
8159                         .Case("{v19}", RISCV::V19)
8160                         .Case("{v20}", RISCV::V20)
8161                         .Case("{v21}", RISCV::V21)
8162                         .Case("{v22}", RISCV::V22)
8163                         .Case("{v23}", RISCV::V23)
8164                         .Case("{v24}", RISCV::V24)
8165                         .Case("{v25}", RISCV::V25)
8166                         .Case("{v26}", RISCV::V26)
8167                         .Case("{v27}", RISCV::V27)
8168                         .Case("{v28}", RISCV::V28)
8169                         .Case("{v29}", RISCV::V29)
8170                         .Case("{v30}", RISCV::V30)
8171                         .Case("{v31}", RISCV::V31)
8172                         .Default(RISCV::NoRegister);
8173     if (VReg != RISCV::NoRegister) {
8174       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8175         return std::make_pair(VReg, &RISCV::VMRegClass);
8176       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8177         return std::make_pair(VReg, &RISCV::VRRegClass);
8178       for (const auto *RC :
8179            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8180         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8181           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8182           return std::make_pair(VReg, RC);
8183         }
8184       }
8185     }
8186   }
8187 
8188   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8189 }
8190 
8191 unsigned
8192 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8193   // Currently only support length 1 constraints.
8194   if (ConstraintCode.size() == 1) {
8195     switch (ConstraintCode[0]) {
8196     case 'A':
8197       return InlineAsm::Constraint_A;
8198     default:
8199       break;
8200     }
8201   }
8202 
8203   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8204 }
8205 
8206 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8207     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8208     SelectionDAG &DAG) const {
8209   // Currently only support length 1 constraints.
8210   if (Constraint.length() == 1) {
8211     switch (Constraint[0]) {
8212     case 'I':
8213       // Validate & create a 12-bit signed immediate operand.
8214       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8215         uint64_t CVal = C->getSExtValue();
8216         if (isInt<12>(CVal))
8217           Ops.push_back(
8218               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8219       }
8220       return;
8221     case 'J':
8222       // Validate & create an integer zero operand.
8223       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8224         if (C->getZExtValue() == 0)
8225           Ops.push_back(
8226               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8227       return;
8228     case 'K':
8229       // Validate & create a 5-bit unsigned immediate operand.
8230       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8231         uint64_t CVal = C->getZExtValue();
8232         if (isUInt<5>(CVal))
8233           Ops.push_back(
8234               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8235       }
8236       return;
8237     default:
8238       break;
8239     }
8240   }
8241   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8242 }
8243 
8244 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
8245                                                    Instruction *Inst,
8246                                                    AtomicOrdering Ord) const {
8247   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8248     return Builder.CreateFence(Ord);
8249   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8250     return Builder.CreateFence(AtomicOrdering::Release);
8251   return nullptr;
8252 }
8253 
8254 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
8255                                                     Instruction *Inst,
8256                                                     AtomicOrdering Ord) const {
8257   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8258     return Builder.CreateFence(AtomicOrdering::Acquire);
8259   return nullptr;
8260 }
8261 
8262 TargetLowering::AtomicExpansionKind
8263 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8264   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8265   // point operations can't be used in an lr/sc sequence without breaking the
8266   // forward-progress guarantee.
8267   if (AI->isFloatingPointOperation())
8268     return AtomicExpansionKind::CmpXChg;
8269 
8270   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8271   if (Size == 8 || Size == 16)
8272     return AtomicExpansionKind::MaskedIntrinsic;
8273   return AtomicExpansionKind::None;
8274 }
8275 
8276 static Intrinsic::ID
8277 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8278   if (XLen == 32) {
8279     switch (BinOp) {
8280     default:
8281       llvm_unreachable("Unexpected AtomicRMW BinOp");
8282     case AtomicRMWInst::Xchg:
8283       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8284     case AtomicRMWInst::Add:
8285       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8286     case AtomicRMWInst::Sub:
8287       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8288     case AtomicRMWInst::Nand:
8289       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8290     case AtomicRMWInst::Max:
8291       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8292     case AtomicRMWInst::Min:
8293       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8294     case AtomicRMWInst::UMax:
8295       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8296     case AtomicRMWInst::UMin:
8297       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8298     }
8299   }
8300 
8301   if (XLen == 64) {
8302     switch (BinOp) {
8303     default:
8304       llvm_unreachable("Unexpected AtomicRMW BinOp");
8305     case AtomicRMWInst::Xchg:
8306       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8307     case AtomicRMWInst::Add:
8308       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8309     case AtomicRMWInst::Sub:
8310       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8311     case AtomicRMWInst::Nand:
8312       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8313     case AtomicRMWInst::Max:
8314       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8315     case AtomicRMWInst::Min:
8316       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8317     case AtomicRMWInst::UMax:
8318       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8319     case AtomicRMWInst::UMin:
8320       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8321     }
8322   }
8323 
8324   llvm_unreachable("Unexpected XLen\n");
8325 }
8326 
8327 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8328     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8329     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8330   unsigned XLen = Subtarget.getXLen();
8331   Value *Ordering =
8332       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8333   Type *Tys[] = {AlignedAddr->getType()};
8334   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8335       AI->getModule(),
8336       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8337 
8338   if (XLen == 64) {
8339     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8340     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8341     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8342   }
8343 
8344   Value *Result;
8345 
8346   // Must pass the shift amount needed to sign extend the loaded value prior
8347   // to performing a signed comparison for min/max. ShiftAmt is the number of
8348   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8349   // is the number of bits to left+right shift the value in order to
8350   // sign-extend.
8351   if (AI->getOperation() == AtomicRMWInst::Min ||
8352       AI->getOperation() == AtomicRMWInst::Max) {
8353     const DataLayout &DL = AI->getModule()->getDataLayout();
8354     unsigned ValWidth =
8355         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8356     Value *SextShamt =
8357         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8358     Result = Builder.CreateCall(LrwOpScwLoop,
8359                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8360   } else {
8361     Result =
8362         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8363   }
8364 
8365   if (XLen == 64)
8366     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8367   return Result;
8368 }
8369 
8370 TargetLowering::AtomicExpansionKind
8371 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8372     AtomicCmpXchgInst *CI) const {
8373   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8374   if (Size == 8 || Size == 16)
8375     return AtomicExpansionKind::MaskedIntrinsic;
8376   return AtomicExpansionKind::None;
8377 }
8378 
8379 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8380     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8381     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8382   unsigned XLen = Subtarget.getXLen();
8383   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8384   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8385   if (XLen == 64) {
8386     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8387     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8388     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8389     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8390   }
8391   Type *Tys[] = {AlignedAddr->getType()};
8392   Function *MaskedCmpXchg =
8393       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8394   Value *Result = Builder.CreateCall(
8395       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8396   if (XLen == 64)
8397     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8398   return Result;
8399 }
8400 
8401 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8402   return false;
8403 }
8404 
8405 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8406                                                      EVT VT) const {
8407   VT = VT.getScalarType();
8408 
8409   if (!VT.isSimple())
8410     return false;
8411 
8412   switch (VT.getSimpleVT().SimpleTy) {
8413   case MVT::f16:
8414     return Subtarget.hasStdExtZfh();
8415   case MVT::f32:
8416     return Subtarget.hasStdExtF();
8417   case MVT::f64:
8418     return Subtarget.hasStdExtD();
8419   default:
8420     break;
8421   }
8422 
8423   return false;
8424 }
8425 
8426 Register RISCVTargetLowering::getExceptionPointerRegister(
8427     const Constant *PersonalityFn) const {
8428   return RISCV::X10;
8429 }
8430 
8431 Register RISCVTargetLowering::getExceptionSelectorRegister(
8432     const Constant *PersonalityFn) const {
8433   return RISCV::X11;
8434 }
8435 
8436 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8437   // Return false to suppress the unnecessary extensions if the LibCall
8438   // arguments or return value is f32 type for LP64 ABI.
8439   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8440   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8441     return false;
8442 
8443   return true;
8444 }
8445 
8446 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8447   if (Subtarget.is64Bit() && Type == MVT::i32)
8448     return true;
8449 
8450   return IsSigned;
8451 }
8452 
8453 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8454                                                  SDValue C) const {
8455   // Check integral scalar types.
8456   if (VT.isScalarInteger()) {
8457     // Omit the optimization if the sub target has the M extension and the data
8458     // size exceeds XLen.
8459     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8460       return false;
8461     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8462       // Break the MUL to a SLLI and an ADD/SUB.
8463       const APInt &Imm = ConstNode->getAPIntValue();
8464       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8465           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8466         return true;
8467       // Omit the following optimization if the sub target has the M extension
8468       // and the data size >= XLen.
8469       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8470         return false;
8471       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8472       // a pair of LUI/ADDI.
8473       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8474         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8475         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8476             (1 - ImmS).isPowerOf2())
8477         return true;
8478       }
8479     }
8480   }
8481 
8482   return false;
8483 }
8484 
8485 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8486     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8487     bool *Fast) const {
8488   if (!VT.isScalableVector())
8489     return false;
8490 
8491   EVT ElemVT = VT.getVectorElementType();
8492   if (Alignment >= ElemVT.getStoreSize()) {
8493     if (Fast)
8494       *Fast = true;
8495     return true;
8496   }
8497 
8498   return false;
8499 }
8500 
8501 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8502     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8503     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8504   bool IsABIRegCopy = CC.hasValue();
8505   EVT ValueVT = Val.getValueType();
8506   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8507     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8508     // and cast to f32.
8509     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8510     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8511     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8512                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8513     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8514     Parts[0] = Val;
8515     return true;
8516   }
8517 
8518   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8519     LLVMContext &Context = *DAG.getContext();
8520     EVT ValueEltVT = ValueVT.getVectorElementType();
8521     EVT PartEltVT = PartVT.getVectorElementType();
8522     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8523     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8524     if (PartVTBitSize % ValueVTBitSize == 0) {
8525       // If the element types are different, bitcast to the same element type of
8526       // PartVT first.
8527       if (ValueEltVT != PartEltVT) {
8528         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8529         assert(Count != 0 && "The number of element should not be zero.");
8530         EVT SameEltTypeVT =
8531             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8532         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8533       }
8534       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8535                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8536       Parts[0] = Val;
8537       return true;
8538     }
8539   }
8540   return false;
8541 }
8542 
8543 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8544     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8545     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8546   bool IsABIRegCopy = CC.hasValue();
8547   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8548     SDValue Val = Parts[0];
8549 
8550     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8551     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8552     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8553     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8554     return Val;
8555   }
8556 
8557   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8558     LLVMContext &Context = *DAG.getContext();
8559     SDValue Val = Parts[0];
8560     EVT ValueEltVT = ValueVT.getVectorElementType();
8561     EVT PartEltVT = PartVT.getVectorElementType();
8562     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8563     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8564     if (PartVTBitSize % ValueVTBitSize == 0) {
8565       EVT SameEltTypeVT = ValueVT;
8566       // If the element types are different, convert it to the same element type
8567       // of PartVT.
8568       if (ValueEltVT != PartEltVT) {
8569         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8570         assert(Count != 0 && "The number of element should not be zero.");
8571         SameEltTypeVT =
8572             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8573       }
8574       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8575                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8576       if (ValueEltVT != PartEltVT)
8577         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8578       return Val;
8579     }
8580   }
8581   return SDValue();
8582 }
8583 
8584 #define GET_REGISTER_MATCHER
8585 #include "RISCVGenAsmMatcher.inc"
8586 
8587 Register
8588 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8589                                        const MachineFunction &MF) const {
8590   Register Reg = MatchRegisterAltName(RegName);
8591   if (Reg == RISCV::NoRegister)
8592     Reg = MatchRegisterName(RegName);
8593   if (Reg == RISCV::NoRegister)
8594     report_fatal_error(
8595         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8596   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8597   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8598     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8599                              StringRef(RegName) + "\"."));
8600   return Reg;
8601 }
8602 
8603 namespace llvm {
8604 namespace RISCVVIntrinsicsTable {
8605 
8606 #define GET_RISCVVIntrinsicsTable_IMPL
8607 #include "RISCVGenSearchableTables.inc"
8608 
8609 } // namespace RISCVVIntrinsicsTable
8610 
8611 } // namespace llvm
8612