1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
254     setOperationAction(ISD::BSWAP, XLenVT, Custom);
255 
256     if (Subtarget.is64Bit()) {
257       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
258       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
259     }
260   } else {
261     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
262     // pattern match it directly in isel.
263     setOperationAction(ISD::BSWAP, XLenVT,
264                        Subtarget.hasStdExtZbb() ? Legal : Expand);
265   }
266 
267   if (Subtarget.hasStdExtZbb()) {
268     setOperationAction(ISD::SMIN, XLenVT, Legal);
269     setOperationAction(ISD::SMAX, XLenVT, Legal);
270     setOperationAction(ISD::UMIN, XLenVT, Legal);
271     setOperationAction(ISD::UMAX, XLenVT, Legal);
272 
273     if (Subtarget.is64Bit()) {
274       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
275       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
276       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
277       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
278     }
279   } else {
280     setOperationAction(ISD::CTTZ, XLenVT, Expand);
281     setOperationAction(ISD::CTLZ, XLenVT, Expand);
282     setOperationAction(ISD::CTPOP, XLenVT, Expand);
283   }
284 
285   if (Subtarget.hasStdExtZbt()) {
286     setOperationAction(ISD::FSHL, XLenVT, Custom);
287     setOperationAction(ISD::FSHR, XLenVT, Custom);
288     setOperationAction(ISD::SELECT, XLenVT, Legal);
289 
290     if (Subtarget.is64Bit()) {
291       setOperationAction(ISD::FSHL, MVT::i32, Custom);
292       setOperationAction(ISD::FSHR, MVT::i32, Custom);
293     }
294   } else {
295     setOperationAction(ISD::SELECT, XLenVT, Custom);
296   }
297 
298   ISD::CondCode FPCCToExpand[] = {
299       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
300       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
301       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
302 
303   ISD::NodeType FPOpToExpand[] = {
304       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
305       ISD::FP_TO_FP16};
306 
307   if (Subtarget.hasStdExtZfh())
308     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
309 
310   if (Subtarget.hasStdExtZfh()) {
311     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
312     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
313     for (auto CC : FPCCToExpand)
314       setCondCodeAction(CC, MVT::f16, Expand);
315     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
316     setOperationAction(ISD::SELECT, MVT::f16, Custom);
317     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
318     for (auto Op : FPOpToExpand)
319       setOperationAction(Op, MVT::f16, Expand);
320   }
321 
322   if (Subtarget.hasStdExtF()) {
323     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
324     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
325     for (auto CC : FPCCToExpand)
326       setCondCodeAction(CC, MVT::f32, Expand);
327     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
328     setOperationAction(ISD::SELECT, MVT::f32, Custom);
329     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
330     for (auto Op : FPOpToExpand)
331       setOperationAction(Op, MVT::f32, Expand);
332     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
333     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
334   }
335 
336   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
337     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
338 
339   if (Subtarget.hasStdExtD()) {
340     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
341     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
342     for (auto CC : FPCCToExpand)
343       setCondCodeAction(CC, MVT::f64, Expand);
344     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
345     setOperationAction(ISD::SELECT, MVT::f64, Custom);
346     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
347     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
348     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
349     for (auto Op : FPOpToExpand)
350       setOperationAction(Op, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
353   }
354 
355   if (Subtarget.is64Bit()) {
356     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
357     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
358     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
359     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
360   }
361 
362   if (Subtarget.hasStdExtF()) {
363     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
364     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
365   }
366 
367   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
368   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
369   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
370   setOperationAction(ISD::JumpTable, XLenVT, Custom);
371 
372   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
373 
374   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
375   // Unfortunately this can't be determined just from the ISA naming string.
376   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
377                      Subtarget.is64Bit() ? Legal : Custom);
378 
379   setOperationAction(ISD::TRAP, MVT::Other, Legal);
380   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
381   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
382   if (Subtarget.is64Bit())
383     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
384 
385   if (Subtarget.hasStdExtA()) {
386     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
387     setMinCmpXchgSizeInBits(32);
388   } else {
389     setMaxAtomicSizeInBitsSupported(0);
390   }
391 
392   setBooleanContents(ZeroOrOneBooleanContent);
393 
394   if (Subtarget.hasStdExtV()) {
395     setBooleanVectorContents(ZeroOrOneBooleanContent);
396 
397     setOperationAction(ISD::VSCALE, XLenVT, Custom);
398 
399     // RVV intrinsics may have illegal operands.
400     // We also need to custom legalize vmv.x.s.
401     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
402     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
403     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
404     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
405     if (Subtarget.is64Bit()) {
406       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
407     } else {
408       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
409       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
410     }
411 
412     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
413 
414     static unsigned IntegerVPOps[] = {
415         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
416         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
417         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
418 
419     if (!Subtarget.is64Bit()) {
420       // We must custom-lower certain vXi64 operations on RV32 due to the vector
421       // element type being illegal.
422       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
423       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
424 
425       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
426       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
427       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
428       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
429       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
430       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
431       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
432       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
433     }
434 
435     for (MVT VT : BoolVecVTs) {
436       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
437 
438       // Mask VTs are custom-expanded into a series of standard nodes
439       setOperationAction(ISD::TRUNCATE, VT, Custom);
440       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
441       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
442 
443       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
444       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
445 
446       setOperationAction(ISD::SELECT, VT, Expand);
447       setOperationAction(ISD::SELECT_CC, VT, Expand);
448 
449       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
450       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
451       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
452 
453       // Expand all extending loads to types larger than this, and truncating
454       // stores from types larger than this.
455       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
456         setTruncStoreAction(OtherVT, VT, Expand);
457         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
458         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
459         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
460       }
461     }
462 
463     for (MVT VT : IntVecVTs) {
464       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
465       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
466 
467       setOperationAction(ISD::SMIN, VT, Legal);
468       setOperationAction(ISD::SMAX, VT, Legal);
469       setOperationAction(ISD::UMIN, VT, Legal);
470       setOperationAction(ISD::UMAX, VT, Legal);
471 
472       setOperationAction(ISD::ROTL, VT, Expand);
473       setOperationAction(ISD::ROTR, VT, Expand);
474 
475       // Custom-lower extensions and truncations from/to mask types.
476       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
477       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
478       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
479 
480       // RVV has native int->float & float->int conversions where the
481       // element type sizes are within one power-of-two of each other. Any
482       // wider distances between type sizes have to be lowered as sequences
483       // which progressively narrow the gap in stages.
484       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
485       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
486       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
487       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
488 
489       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
490       // nodes which truncate by one power of two at a time.
491       setOperationAction(ISD::TRUNCATE, VT, Custom);
492 
493       // Custom-lower insert/extract operations to simplify patterns.
494       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
495       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
496 
497       // Custom-lower reduction operations to set up the corresponding custom
498       // nodes' operands.
499       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
500       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
501       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
502       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
503       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
504       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
505       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
506       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
507 
508       for (unsigned VPOpc : IntegerVPOps) {
509         setOperationAction(VPOpc, VT, Custom);
510         // RV64 must custom-legalize the i32 EVL parameter.
511         if (Subtarget.is64Bit())
512           setOperationAction(VPOpc, MVT::i32, Custom);
513       }
514 
515       setOperationAction(ISD::MLOAD, VT, Custom);
516       setOperationAction(ISD::MSTORE, VT, Custom);
517       setOperationAction(ISD::MGATHER, VT, Custom);
518       setOperationAction(ISD::MSCATTER, VT, Custom);
519 
520       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
521       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
522       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
523 
524       setOperationAction(ISD::SELECT, VT, Expand);
525       setOperationAction(ISD::SELECT_CC, VT, Expand);
526 
527       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
528       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
529 
530       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
531         setTruncStoreAction(VT, OtherVT, Expand);
532         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
533         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
534         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
535       }
536     }
537 
538     // Expand various CCs to best match the RVV ISA, which natively supports UNE
539     // but no other unordered comparisons, and supports all ordered comparisons
540     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
541     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
542     // and we pattern-match those back to the "original", swapping operands once
543     // more. This way we catch both operations and both "vf" and "fv" forms with
544     // fewer patterns.
545     ISD::CondCode VFPCCToExpand[] = {
546         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
547         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
548         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
549     };
550 
551     // Sets common operation actions on RVV floating-point vector types.
552     const auto SetCommonVFPActions = [&](MVT VT) {
553       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
554       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
555       // sizes are within one power-of-two of each other. Therefore conversions
556       // between vXf16 and vXf64 must be lowered as sequences which convert via
557       // vXf32.
558       setOperationAction(ISD::FP_ROUND, VT, Custom);
559       setOperationAction(ISD::FP_EXTEND, VT, Custom);
560       // Custom-lower insert/extract operations to simplify patterns.
561       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
562       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
563       // Expand various condition codes (explained above).
564       for (auto CC : VFPCCToExpand)
565         setCondCodeAction(CC, VT, Expand);
566 
567       setOperationAction(ISD::FMINNUM, VT, Legal);
568       setOperationAction(ISD::FMAXNUM, VT, Legal);
569 
570       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
571       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
572       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
573       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
574       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
575 
576       setOperationAction(ISD::MLOAD, VT, Custom);
577       setOperationAction(ISD::MSTORE, VT, Custom);
578       setOperationAction(ISD::MGATHER, VT, Custom);
579       setOperationAction(ISD::MSCATTER, VT, Custom);
580 
581       setOperationAction(ISD::SELECT, VT, Expand);
582       setOperationAction(ISD::SELECT_CC, VT, Expand);
583 
584       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
585       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
586       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
587 
588       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
589     };
590 
591     // Sets common extload/truncstore actions on RVV floating-point vector
592     // types.
593     const auto SetCommonVFPExtLoadTruncStoreActions =
594         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
595           for (auto SmallVT : SmallerVTs) {
596             setTruncStoreAction(VT, SmallVT, Expand);
597             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
598           }
599         };
600 
601     if (Subtarget.hasStdExtZfh())
602       for (MVT VT : F16VecVTs)
603         SetCommonVFPActions(VT);
604 
605     for (MVT VT : F32VecVTs) {
606       if (Subtarget.hasStdExtF())
607         SetCommonVFPActions(VT);
608       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
609     }
610 
611     for (MVT VT : F64VecVTs) {
612       if (Subtarget.hasStdExtD())
613         SetCommonVFPActions(VT);
614       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
615       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
616     }
617 
618     if (Subtarget.useRVVForFixedLengthVectors()) {
619       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
620         if (!useRVVForFixedLengthVectorVT(VT))
621           continue;
622 
623         // By default everything must be expanded.
624         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
625           setOperationAction(Op, VT, Expand);
626         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
627           setTruncStoreAction(VT, OtherVT, Expand);
628           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
629           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
630           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
631         }
632 
633         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
634         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
635         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
636 
637         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
638         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
639 
640         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
641         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
642 
643         setOperationAction(ISD::LOAD, VT, Custom);
644         setOperationAction(ISD::STORE, VT, Custom);
645 
646         setOperationAction(ISD::SETCC, VT, Custom);
647 
648         setOperationAction(ISD::TRUNCATE, VT, Custom);
649 
650         setOperationAction(ISD::BITCAST, VT, Custom);
651 
652         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
653         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
654         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
655 
656         // Operations below are different for between masks and other vectors.
657         if (VT.getVectorElementType() == MVT::i1) {
658           setOperationAction(ISD::AND, VT, Custom);
659           setOperationAction(ISD::OR, VT, Custom);
660           setOperationAction(ISD::XOR, VT, Custom);
661           continue;
662         }
663 
664         // Use SPLAT_VECTOR to prevent type legalization from destroying the
665         // splats when type legalizing i64 scalar on RV32.
666         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
667         // improvements first.
668         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
669           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
670           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
671         }
672 
673         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
674         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
675 
676         setOperationAction(ISD::MLOAD, VT, Custom);
677         setOperationAction(ISD::MSTORE, VT, Custom);
678         setOperationAction(ISD::MGATHER, VT, Custom);
679         setOperationAction(ISD::MSCATTER, VT, Custom);
680         setOperationAction(ISD::ADD, VT, Custom);
681         setOperationAction(ISD::MUL, VT, Custom);
682         setOperationAction(ISD::SUB, VT, Custom);
683         setOperationAction(ISD::AND, VT, Custom);
684         setOperationAction(ISD::OR, VT, Custom);
685         setOperationAction(ISD::XOR, VT, Custom);
686         setOperationAction(ISD::SDIV, VT, Custom);
687         setOperationAction(ISD::SREM, VT, Custom);
688         setOperationAction(ISD::UDIV, VT, Custom);
689         setOperationAction(ISD::UREM, VT, Custom);
690         setOperationAction(ISD::SHL, VT, Custom);
691         setOperationAction(ISD::SRA, VT, Custom);
692         setOperationAction(ISD::SRL, VT, Custom);
693 
694         setOperationAction(ISD::SMIN, VT, Custom);
695         setOperationAction(ISD::SMAX, VT, Custom);
696         setOperationAction(ISD::UMIN, VT, Custom);
697         setOperationAction(ISD::UMAX, VT, Custom);
698         setOperationAction(ISD::ABS,  VT, Custom);
699 
700         setOperationAction(ISD::MULHS, VT, Custom);
701         setOperationAction(ISD::MULHU, VT, Custom);
702 
703         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
704         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
705         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
706         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
707 
708         setOperationAction(ISD::VSELECT, VT, Custom);
709         setOperationAction(ISD::SELECT, VT, Expand);
710         setOperationAction(ISD::SELECT_CC, VT, Expand);
711 
712         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
713         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
714         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
715 
716         // Custom-lower reduction operations to set up the corresponding custom
717         // nodes' operands.
718         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
719         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
720         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
721         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
722         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
723 
724         for (unsigned VPOpc : IntegerVPOps) {
725           setOperationAction(VPOpc, VT, Custom);
726           // RV64 must custom-legalize the i32 EVL parameter.
727           if (Subtarget.is64Bit())
728             setOperationAction(VPOpc, MVT::i32, Custom);
729         }
730       }
731 
732       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
733         if (!useRVVForFixedLengthVectorVT(VT))
734           continue;
735 
736         // By default everything must be expanded.
737         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
738           setOperationAction(Op, VT, Expand);
739         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
740           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
741           setTruncStoreAction(VT, OtherVT, Expand);
742         }
743 
744         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
745         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
746         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
747 
748         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
749         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
750         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
751         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
752 
753         setOperationAction(ISD::LOAD, VT, Custom);
754         setOperationAction(ISD::STORE, VT, Custom);
755         setOperationAction(ISD::MLOAD, VT, Custom);
756         setOperationAction(ISD::MSTORE, VT, Custom);
757         setOperationAction(ISD::MGATHER, VT, Custom);
758         setOperationAction(ISD::MSCATTER, VT, Custom);
759         setOperationAction(ISD::FADD, VT, Custom);
760         setOperationAction(ISD::FSUB, VT, Custom);
761         setOperationAction(ISD::FMUL, VT, Custom);
762         setOperationAction(ISD::FDIV, VT, Custom);
763         setOperationAction(ISD::FNEG, VT, Custom);
764         setOperationAction(ISD::FABS, VT, Custom);
765         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
766         setOperationAction(ISD::FSQRT, VT, Custom);
767         setOperationAction(ISD::FMA, VT, Custom);
768         setOperationAction(ISD::FMINNUM, VT, Custom);
769         setOperationAction(ISD::FMAXNUM, VT, Custom);
770 
771         setOperationAction(ISD::FP_ROUND, VT, Custom);
772         setOperationAction(ISD::FP_EXTEND, VT, Custom);
773 
774         for (auto CC : VFPCCToExpand)
775           setCondCodeAction(CC, VT, Expand);
776 
777         setOperationAction(ISD::VSELECT, VT, Custom);
778         setOperationAction(ISD::SELECT, VT, Expand);
779         setOperationAction(ISD::SELECT_CC, VT, Expand);
780 
781         setOperationAction(ISD::BITCAST, VT, Custom);
782 
783         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
784         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
785         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
786         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
787       }
788 
789       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
790       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
791       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
792       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
793       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
794       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
795       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
796       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
797     }
798   }
799 
800   // Function alignments.
801   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
802   setMinFunctionAlignment(FunctionAlignment);
803   setPrefFunctionAlignment(FunctionAlignment);
804 
805   setMinimumJumpTableEntries(5);
806 
807   // Jumps are expensive, compared to logic
808   setJumpIsExpensive();
809 
810   // We can use any register for comparisons
811   setHasMultipleConditionRegisters();
812 
813   setTargetDAGCombine(ISD::AND);
814   setTargetDAGCombine(ISD::OR);
815   setTargetDAGCombine(ISD::XOR);
816   if (Subtarget.hasStdExtV()) {
817     setTargetDAGCombine(ISD::FCOPYSIGN);
818     setTargetDAGCombine(ISD::MGATHER);
819     setTargetDAGCombine(ISD::MSCATTER);
820   }
821 }
822 
823 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
824                                             LLVMContext &Context,
825                                             EVT VT) const {
826   if (!VT.isVector())
827     return getPointerTy(DL);
828   if (Subtarget.hasStdExtV() &&
829       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
830     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
831   return VT.changeVectorElementTypeToInteger();
832 }
833 
834 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
835                                              const CallInst &I,
836                                              MachineFunction &MF,
837                                              unsigned Intrinsic) const {
838   switch (Intrinsic) {
839   default:
840     return false;
841   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
842   case Intrinsic::riscv_masked_atomicrmw_add_i32:
843   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
844   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
845   case Intrinsic::riscv_masked_atomicrmw_max_i32:
846   case Intrinsic::riscv_masked_atomicrmw_min_i32:
847   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
848   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
849   case Intrinsic::riscv_masked_cmpxchg_i32:
850     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
851     Info.opc = ISD::INTRINSIC_W_CHAIN;
852     Info.memVT = MVT::getVT(PtrTy->getElementType());
853     Info.ptrVal = I.getArgOperand(0);
854     Info.offset = 0;
855     Info.align = Align(4);
856     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
857                  MachineMemOperand::MOVolatile;
858     return true;
859   }
860 }
861 
862 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
863                                                 const AddrMode &AM, Type *Ty,
864                                                 unsigned AS,
865                                                 Instruction *I) const {
866   // No global is ever allowed as a base.
867   if (AM.BaseGV)
868     return false;
869 
870   // Require a 12-bit signed offset.
871   if (!isInt<12>(AM.BaseOffs))
872     return false;
873 
874   switch (AM.Scale) {
875   case 0: // "r+i" or just "i", depending on HasBaseReg.
876     break;
877   case 1:
878     if (!AM.HasBaseReg) // allow "r+i".
879       break;
880     return false; // disallow "r+r" or "r+r+i".
881   default:
882     return false;
883   }
884 
885   return true;
886 }
887 
888 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
889   return isInt<12>(Imm);
890 }
891 
892 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
893   return isInt<12>(Imm);
894 }
895 
896 // On RV32, 64-bit integers are split into their high and low parts and held
897 // in two different registers, so the trunc is free since the low register can
898 // just be used.
899 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
900   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
901     return false;
902   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
903   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
904   return (SrcBits == 64 && DestBits == 32);
905 }
906 
907 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
908   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
909       !SrcVT.isInteger() || !DstVT.isInteger())
910     return false;
911   unsigned SrcBits = SrcVT.getSizeInBits();
912   unsigned DestBits = DstVT.getSizeInBits();
913   return (SrcBits == 64 && DestBits == 32);
914 }
915 
916 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
917   // Zexts are free if they can be combined with a load.
918   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
919     EVT MemVT = LD->getMemoryVT();
920     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
921          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
922         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
923          LD->getExtensionType() == ISD::ZEXTLOAD))
924       return true;
925   }
926 
927   return TargetLowering::isZExtFree(Val, VT2);
928 }
929 
930 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
931   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
932 }
933 
934 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
935   return Subtarget.hasStdExtZbb();
936 }
937 
938 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
939   return Subtarget.hasStdExtZbb();
940 }
941 
942 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
943                                        bool ForCodeSize) const {
944   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
945     return false;
946   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
947     return false;
948   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
949     return false;
950   if (Imm.isNegZero())
951     return false;
952   return Imm.isZero();
953 }
954 
955 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
956   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
957          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
958          (VT == MVT::f64 && Subtarget.hasStdExtD());
959 }
960 
961 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
962                                                       CallingConv::ID CC,
963                                                       EVT VT) const {
964   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
965   // end up using a GPR but that will be decided based on ABI.
966   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
967     return MVT::f32;
968 
969   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
970 }
971 
972 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
973                                                            CallingConv::ID CC,
974                                                            EVT VT) const {
975   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
976   // end up using a GPR but that will be decided based on ABI.
977   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
978     return 1;
979 
980   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
981 }
982 
983 // Changes the condition code and swaps operands if necessary, so the SetCC
984 // operation matches one of the comparisons supported directly by branches
985 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
986 // with 1/-1.
987 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
988                                     ISD::CondCode &CC, SelectionDAG &DAG) {
989   // Convert X > -1 to X >= 0.
990   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
991     RHS = DAG.getConstant(0, DL, RHS.getValueType());
992     CC = ISD::SETGE;
993     return;
994   }
995   // Convert X < 1 to 0 >= X.
996   if (CC == ISD::SETLT && isOneConstant(RHS)) {
997     RHS = LHS;
998     LHS = DAG.getConstant(0, DL, RHS.getValueType());
999     CC = ISD::SETGE;
1000     return;
1001   }
1002 
1003   switch (CC) {
1004   default:
1005     break;
1006   case ISD::SETGT:
1007   case ISD::SETLE:
1008   case ISD::SETUGT:
1009   case ISD::SETULE:
1010     CC = ISD::getSetCCSwappedOperands(CC);
1011     std::swap(LHS, RHS);
1012     break;
1013   }
1014 }
1015 
1016 // Return the RISC-V branch opcode that matches the given DAG integer
1017 // condition code. The CondCode must be one of those supported by the RISC-V
1018 // ISA (see translateSetCCForBranch).
1019 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1020   switch (CC) {
1021   default:
1022     llvm_unreachable("Unsupported CondCode");
1023   case ISD::SETEQ:
1024     return RISCV::BEQ;
1025   case ISD::SETNE:
1026     return RISCV::BNE;
1027   case ISD::SETLT:
1028     return RISCV::BLT;
1029   case ISD::SETGE:
1030     return RISCV::BGE;
1031   case ISD::SETULT:
1032     return RISCV::BLTU;
1033   case ISD::SETUGE:
1034     return RISCV::BGEU;
1035   }
1036 }
1037 
1038 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1039   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1040   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1041   if (VT.getVectorElementType() == MVT::i1)
1042     KnownSize *= 8;
1043 
1044   switch (KnownSize) {
1045   default:
1046     llvm_unreachable("Invalid LMUL.");
1047   case 8:
1048     return RISCVII::VLMUL::LMUL_F8;
1049   case 16:
1050     return RISCVII::VLMUL::LMUL_F4;
1051   case 32:
1052     return RISCVII::VLMUL::LMUL_F2;
1053   case 64:
1054     return RISCVII::VLMUL::LMUL_1;
1055   case 128:
1056     return RISCVII::VLMUL::LMUL_2;
1057   case 256:
1058     return RISCVII::VLMUL::LMUL_4;
1059   case 512:
1060     return RISCVII::VLMUL::LMUL_8;
1061   }
1062 }
1063 
1064 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1065   switch (LMul) {
1066   default:
1067     llvm_unreachable("Invalid LMUL.");
1068   case RISCVII::VLMUL::LMUL_F8:
1069   case RISCVII::VLMUL::LMUL_F4:
1070   case RISCVII::VLMUL::LMUL_F2:
1071   case RISCVII::VLMUL::LMUL_1:
1072     return RISCV::VRRegClassID;
1073   case RISCVII::VLMUL::LMUL_2:
1074     return RISCV::VRM2RegClassID;
1075   case RISCVII::VLMUL::LMUL_4:
1076     return RISCV::VRM4RegClassID;
1077   case RISCVII::VLMUL::LMUL_8:
1078     return RISCV::VRM8RegClassID;
1079   }
1080 }
1081 
1082 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1083   RISCVII::VLMUL LMUL = getLMUL(VT);
1084   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1085       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1086       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1087       LMUL == RISCVII::VLMUL::LMUL_1) {
1088     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1089                   "Unexpected subreg numbering");
1090     return RISCV::sub_vrm1_0 + Index;
1091   }
1092   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1093     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1094                   "Unexpected subreg numbering");
1095     return RISCV::sub_vrm2_0 + Index;
1096   }
1097   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1098     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1099                   "Unexpected subreg numbering");
1100     return RISCV::sub_vrm4_0 + Index;
1101   }
1102   llvm_unreachable("Invalid vector type.");
1103 }
1104 
1105 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1106   if (VT.getVectorElementType() == MVT::i1)
1107     return RISCV::VRRegClassID;
1108   return getRegClassIDForLMUL(getLMUL(VT));
1109 }
1110 
1111 // Attempt to decompose a subvector insert/extract between VecVT and
1112 // SubVecVT via subregister indices. Returns the subregister index that
1113 // can perform the subvector insert/extract with the given element index, as
1114 // well as the index corresponding to any leftover subvectors that must be
1115 // further inserted/extracted within the register class for SubVecVT.
1116 std::pair<unsigned, unsigned>
1117 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1118     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1119     const RISCVRegisterInfo *TRI) {
1120   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1121                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1122                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1123                 "Register classes not ordered");
1124   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1125   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1126   // Try to compose a subregister index that takes us from the incoming
1127   // LMUL>1 register class down to the outgoing one. At each step we half
1128   // the LMUL:
1129   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1130   // Note that this is not guaranteed to find a subregister index, such as
1131   // when we are extracting from one VR type to another.
1132   unsigned SubRegIdx = RISCV::NoSubRegister;
1133   for (const unsigned RCID :
1134        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1135     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1136       VecVT = VecVT.getHalfNumVectorElementsVT();
1137       bool IsHi =
1138           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1139       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1140                                             getSubregIndexByMVT(VecVT, IsHi));
1141       if (IsHi)
1142         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1143     }
1144   return {SubRegIdx, InsertExtractIdx};
1145 }
1146 
1147 static bool useRVVForFixedLengthVectorVT(MVT VT,
1148                                          const RISCVSubtarget &Subtarget) {
1149   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1150   if (!Subtarget.useRVVForFixedLengthVectors())
1151     return false;
1152 
1153   // We only support a set of vector types with an equivalent number of
1154   // elements to avoid legalization issues. Therefore -- since we don't have
1155   // v512i8/v512i16/etc -- the longest fixed-length vector type we support has
1156   // 256 elements.
1157   if (VT.getVectorNumElements() > 256)
1158     return false;
1159 
1160   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1161 
1162   // Don't use RVV for vectors we cannot scalarize if required.
1163   switch (VT.getVectorElementType().SimpleTy) {
1164   // i1 is supported but has different rules.
1165   default:
1166     return false;
1167   case MVT::i1:
1168     // Masks can only use a single register.
1169     if (VT.getVectorNumElements() > MinVLen)
1170       return false;
1171     MinVLen /= 8;
1172     break;
1173   case MVT::i8:
1174   case MVT::i16:
1175   case MVT::i32:
1176   case MVT::i64:
1177     break;
1178   case MVT::f16:
1179     if (!Subtarget.hasStdExtZfh())
1180       return false;
1181     break;
1182   case MVT::f32:
1183     if (!Subtarget.hasStdExtF())
1184       return false;
1185     break;
1186   case MVT::f64:
1187     if (!Subtarget.hasStdExtD())
1188       return false;
1189     break;
1190   }
1191 
1192   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1193   // Don't use RVV for types that don't fit.
1194   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1195     return false;
1196 
1197   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1198   // the base fixed length RVV support in place.
1199   if (!VT.isPow2VectorType())
1200     return false;
1201 
1202   return true;
1203 }
1204 
1205 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1206   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1207 }
1208 
1209 // Return the largest legal scalable vector type that matches VT's element type.
1210 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1211                                             const RISCVSubtarget &Subtarget) {
1212   // This may be called before legal types are setup.
1213   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1214           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1215          "Expected legal fixed length vector!");
1216 
1217   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1218 
1219   MVT EltVT = VT.getVectorElementType();
1220   switch (EltVT.SimpleTy) {
1221   default:
1222     llvm_unreachable("unexpected element type for RVV container");
1223   case MVT::i1:
1224   case MVT::i8:
1225   case MVT::i16:
1226   case MVT::i32:
1227   case MVT::i64:
1228   case MVT::f16:
1229   case MVT::f32:
1230   case MVT::f64: {
1231     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1232     // narrower types, but we can't have a fractional LMUL with demoninator less
1233     // than 64/SEW.
1234     unsigned NumElts =
1235         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1236     return MVT::getScalableVectorVT(EltVT, NumElts);
1237   }
1238   }
1239 }
1240 
1241 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1242                                             const RISCVSubtarget &Subtarget) {
1243   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1244                                           Subtarget);
1245 }
1246 
1247 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1248   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1249 }
1250 
1251 // Grow V to consume an entire RVV register.
1252 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1253                                        const RISCVSubtarget &Subtarget) {
1254   assert(VT.isScalableVector() &&
1255          "Expected to convert into a scalable vector!");
1256   assert(V.getValueType().isFixedLengthVector() &&
1257          "Expected a fixed length vector operand!");
1258   SDLoc DL(V);
1259   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1260   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1261 }
1262 
1263 // Shrink V so it's just big enough to maintain a VT's worth of data.
1264 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1265                                          const RISCVSubtarget &Subtarget) {
1266   assert(VT.isFixedLengthVector() &&
1267          "Expected to convert into a fixed length vector!");
1268   assert(V.getValueType().isScalableVector() &&
1269          "Expected a scalable vector operand!");
1270   SDLoc DL(V);
1271   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1272   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1273 }
1274 
1275 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1276 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1277 // the vector type that it is contained in.
1278 static std::pair<SDValue, SDValue>
1279 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1280                 const RISCVSubtarget &Subtarget) {
1281   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1282   MVT XLenVT = Subtarget.getXLenVT();
1283   SDValue VL = VecVT.isFixedLengthVector()
1284                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1285                    : DAG.getRegister(RISCV::X0, XLenVT);
1286   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1287   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1288   return {Mask, VL};
1289 }
1290 
1291 // As above but assuming the given type is a scalable vector type.
1292 static std::pair<SDValue, SDValue>
1293 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1294                         const RISCVSubtarget &Subtarget) {
1295   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1296   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1297 }
1298 
1299 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1300 // of either is (currently) supported. This can get us into an infinite loop
1301 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1302 // as a ..., etc.
1303 // Until either (or both) of these can reliably lower any node, reporting that
1304 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1305 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1306 // which is not desirable.
1307 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1308     EVT VT, unsigned DefinedValues) const {
1309   return false;
1310 }
1311 
1312 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1313   // Only splats are currently supported.
1314   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1315     return true;
1316 
1317   return false;
1318 }
1319 
1320 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1321                                  const RISCVSubtarget &Subtarget) {
1322   MVT VT = Op.getSimpleValueType();
1323   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1324 
1325   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1326 
1327   SDLoc DL(Op);
1328   SDValue Mask, VL;
1329   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1330 
1331   unsigned Opc =
1332       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1333   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1334   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1335 }
1336 
1337 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1338                                  const RISCVSubtarget &Subtarget) {
1339   MVT VT = Op.getSimpleValueType();
1340   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1341 
1342   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1343 
1344   SDLoc DL(Op);
1345   SDValue Mask, VL;
1346   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1347 
1348   MVT XLenVT = Subtarget.getXLenVT();
1349   unsigned NumElts = Op.getNumOperands();
1350 
1351   if (VT.getVectorElementType() == MVT::i1) {
1352     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1353       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1354       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1355     }
1356 
1357     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1358       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1359       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1360     }
1361 
1362     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1363     // scalar integer chunks whose bit-width depends on the number of mask
1364     // bits and XLEN.
1365     // First, determine the most appropriate scalar integer type to use. This
1366     // is at most XLenVT, but may be shrunk to a smaller vector element type
1367     // according to the size of the final vector - use i8 chunks rather than
1368     // XLenVT if we're producing a v8i1. This results in more consistent
1369     // codegen across RV32 and RV64.
1370     // If we have to use more than one INSERT_VECTOR_ELT then this optimization
1371     // is likely to increase code size; avoid peforming it in such a case.
1372     unsigned NumViaIntegerBits =
1373         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1374     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1375         (!DAG.shouldOptForSize() || NumElts <= NumViaIntegerBits)) {
1376       // Now we can create our integer vector type. Note that it may be larger
1377       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1378       MVT IntegerViaVecVT =
1379           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1380                            divideCeil(NumElts, NumViaIntegerBits));
1381 
1382       uint64_t Bits = 0;
1383       unsigned BitPos = 0, IntegerEltIdx = 0;
1384       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1385 
1386       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1387         // Once we accumulate enough bits to fill our scalar type, insert into
1388         // our vector and clear our accumulated data.
1389         if (I != 0 && I % NumViaIntegerBits == 0) {
1390           if (NumViaIntegerBits <= 32)
1391             Bits = SignExtend64(Bits, 32);
1392           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1393           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1394                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1395           Bits = 0;
1396           BitPos = 0;
1397           IntegerEltIdx++;
1398         }
1399         SDValue V = Op.getOperand(I);
1400         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1401         Bits |= ((uint64_t)BitValue << BitPos);
1402       }
1403 
1404       // Insert the (remaining) scalar value into position in our integer
1405       // vector type.
1406       if (NumViaIntegerBits <= 32)
1407         Bits = SignExtend64(Bits, 32);
1408       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1409       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1410                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1411 
1412       if (NumElts < NumViaIntegerBits) {
1413         // If we're producing a smaller vector than our minimum legal integer
1414         // type, bitcast to the equivalent (known-legal) mask type, and extract
1415         // our final mask.
1416         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1417         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1418         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1419                           DAG.getConstant(0, DL, XLenVT));
1420       } else {
1421         // Else we must have produced an integer type with the same size as the
1422         // mask type; bitcast for the final result.
1423         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1424         Vec = DAG.getBitcast(VT, Vec);
1425       }
1426 
1427       return Vec;
1428     }
1429 
1430     // A splat can be lowered as a SETCC. For each fixed-length mask vector
1431     // type, we have a legal equivalently-sized i8 type, so we can use that.
1432     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1433       assert(Splat.getValueType() == XLenVT &&
1434              "Unexpected type for i1 splat value");
1435       MVT InterVT = VT.changeVectorElementType(MVT::i8);
1436       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1437                           DAG.getConstant(1, DL, XLenVT));
1438       Splat = DAG.getSplatBuildVector(InterVT, DL, Splat);
1439       SDValue Zero = DAG.getConstant(0, DL, InterVT);
1440       return DAG.getSetCC(DL, VT, Splat, Zero, ISD::SETNE);
1441     }
1442 
1443     return SDValue();
1444   }
1445 
1446   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1447     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1448                                         : RISCVISD::VMV_V_X_VL;
1449     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1450     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1451   }
1452 
1453   // Try and match an index sequence, which we can lower directly to the vid
1454   // instruction. An all-undef vector is matched by getSplatValue, above.
1455   if (VT.isInteger()) {
1456     bool IsVID = true;
1457     for (unsigned I = 0; I < NumElts && IsVID; I++)
1458       IsVID &= Op.getOperand(I).isUndef() ||
1459                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1460                 Op.getConstantOperandVal(I) == I);
1461 
1462     if (IsVID) {
1463       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1464       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1465     }
1466   }
1467 
1468   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1469   // when re-interpreted as a vector with a larger element type. For example,
1470   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1471   // could be instead splat as
1472   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1473   // TODO: This optimization could also work on non-constant splats, but it
1474   // would require bit-manipulation instructions to construct the splat value.
1475   SmallVector<SDValue> Sequence;
1476   unsigned EltBitSize = VT.getScalarSizeInBits();
1477   const auto *BV = cast<BuildVectorSDNode>(Op);
1478   if (VT.isInteger() && EltBitSize < 64 &&
1479       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1480       BV->getRepeatedSequence(Sequence) &&
1481       (Sequence.size() * EltBitSize) <= 64) {
1482     unsigned SeqLen = Sequence.size();
1483     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1484     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1485     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1486             ViaIntVT == MVT::i64) &&
1487            "Unexpected sequence type");
1488 
1489     unsigned EltIdx = 0;
1490     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1491     uint64_t SplatValue = 0;
1492     // Construct the amalgamated value which can be splatted as this larger
1493     // vector type.
1494     for (const auto &SeqV : Sequence) {
1495       if (!SeqV.isUndef())
1496         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1497                        << (EltIdx * EltBitSize));
1498       EltIdx++;
1499     }
1500 
1501     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1502     // achieve better constant materializion.
1503     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1504       SplatValue = SignExtend64(SplatValue, 32);
1505 
1506     // Since we can't introduce illegal i64 types at this stage, we can only
1507     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1508     // way we can use RVV instructions to splat.
1509     assert((ViaIntVT.bitsLE(XLenVT) ||
1510             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1511            "Unexpected bitcast sequence");
1512     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1513       SDValue ViaVL =
1514           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1515       MVT ViaContainerVT =
1516           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1517       SDValue Splat =
1518           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1519                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1520       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1521       return DAG.getBitcast(VT, Splat);
1522     }
1523   }
1524 
1525   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1526   // which constitute a large proportion of the elements. In such cases we can
1527   // splat a vector with the dominant element and make up the shortfall with
1528   // INSERT_VECTOR_ELTs.
1529   // Note that this includes vectors of 2 elements by association. The
1530   // upper-most element is the "dominant" one, allowing us to use a splat to
1531   // "insert" the upper element, and an insert of the lower element at position
1532   // 0, which improves codegen.
1533   SDValue DominantValue;
1534   unsigned MostCommonCount = 0;
1535   DenseMap<SDValue, unsigned> ValueCounts;
1536   unsigned NumUndefElts =
1537       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1538 
1539   for (SDValue V : Op->op_values()) {
1540     if (V.isUndef())
1541       continue;
1542 
1543     ValueCounts.insert(std::make_pair(V, 0));
1544     unsigned &Count = ValueCounts[V];
1545 
1546     // Is this value dominant? In case of a tie, prefer the highest element as
1547     // it's cheaper to insert near the beginning of a vector than it is at the
1548     // end.
1549     if (++Count >= MostCommonCount) {
1550       DominantValue = V;
1551       MostCommonCount = Count;
1552     }
1553   }
1554 
1555   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1556   unsigned NumDefElts = NumElts - NumUndefElts;
1557   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1558 
1559   // Don't perform this optimization when optimizing for size, since
1560   // materializing elements and inserting them tends to cause code bloat.
1561   if (!DAG.shouldOptForSize() &&
1562       ((MostCommonCount > DominantValueCountThreshold) ||
1563        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1564     // Start by splatting the most common element.
1565     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1566 
1567     DenseSet<SDValue> Processed{DominantValue};
1568     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1569     for (const auto &OpIdx : enumerate(Op->ops())) {
1570       const SDValue &V = OpIdx.value();
1571       if (V.isUndef() || !Processed.insert(V).second)
1572         continue;
1573       if (ValueCounts[V] == 1) {
1574         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1575                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1576       } else {
1577         // Blend in all instances of this value using a VSELECT, using a
1578         // mask where each bit signals whether that element is the one
1579         // we're after.
1580         SmallVector<SDValue> Ops;
1581         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1582           return DAG.getConstant(V == V1, DL, XLenVT);
1583         });
1584         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1585                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1586                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1587       }
1588     }
1589 
1590     return Vec;
1591   }
1592 
1593   return SDValue();
1594 }
1595 
1596 // Use a stack slot to splat the two i32 values in Lo/Hi to the vector desired
1597 // vector nxvXi64 VT.
1598 static SDValue splatPartsI64ThroughStack(const SDLoc &DL, MVT VT, SDValue Lo,
1599                                          SDValue Hi, SDValue VL,
1600                                          SelectionDAG &DAG) {
1601   assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() &&
1602          Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 &&
1603          "Unexpected VTs!");
1604   MachineFunction &MF = DAG.getMachineFunction();
1605   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
1606 
1607   // We use the same frame index we use for moving two i32s into 64-bit FPR.
1608   // This is an analogous operation.
1609   int FI = FuncInfo->getMoveF64FrameIndex(MF);
1610   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1611   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1612   SDValue StackSlot =
1613       DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()));
1614 
1615   SDValue Chain = DAG.getEntryNode();
1616   Lo = DAG.getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
1617 
1618   SDValue OffsetSlot =
1619       DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
1620   Hi = DAG.getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4), Align(8));
1621 
1622   Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
1623 
1624   SDVTList VTs = DAG.getVTList({VT, MVT::Other});
1625   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64);
1626   SDValue Ops[] = {Chain, IntID, StackSlot,
1627                    DAG.getRegister(RISCV::X0, MVT::i64), VL};
1628 
1629   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64,
1630                                  MPI, Align(8), MachineMemOperand::MOLoad);
1631 }
1632 
1633 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1634                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1635   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1636     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1637     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1638     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1639     // node in order to try and match RVV vector/scalar instructions.
1640     if ((LoC >> 31) == HiC)
1641       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1642   }
1643 
1644   // Fall back to a stack store and stride x0 vector load.
1645   return splatPartsI64ThroughStack(DL, VT, Lo, Hi, VL, DAG);
1646 }
1647 
1648 // Called by type legalization to handle splat of i64 on RV32.
1649 // FIXME: We can optimize this when the type has sign or zero bits in one
1650 // of the halves.
1651 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1652                                    SDValue VL, SelectionDAG &DAG) {
1653   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1654   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1655                            DAG.getConstant(0, DL, MVT::i32));
1656   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1657                            DAG.getConstant(1, DL, MVT::i32));
1658   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1659 }
1660 
1661 // This function lowers a splat of a scalar operand Splat with the vector
1662 // length VL. It ensures the final sequence is type legal, which is useful when
1663 // lowering a splat after type legalization.
1664 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1665                                 SelectionDAG &DAG,
1666                                 const RISCVSubtarget &Subtarget) {
1667   if (VT.isFloatingPoint())
1668     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1669 
1670   MVT XLenVT = Subtarget.getXLenVT();
1671 
1672   // Simplest case is that the operand needs to be promoted to XLenVT.
1673   if (Scalar.getValueType().bitsLE(XLenVT)) {
1674     // If the operand is a constant, sign extend to increase our chances
1675     // of being able to use a .vi instruction. ANY_EXTEND would become a
1676     // a zero extend and the simm5 check in isel would fail.
1677     // FIXME: Should we ignore the upper bits in isel instead?
1678     unsigned ExtOpc =
1679         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1680     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1681     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1682   }
1683 
1684   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1685          "Unexpected scalar for splat lowering!");
1686 
1687   // Otherwise use the more complicated splatting algorithm.
1688   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1689 }
1690 
1691 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1692                                    const RISCVSubtarget &Subtarget) {
1693   SDValue V1 = Op.getOperand(0);
1694   SDValue V2 = Op.getOperand(1);
1695   SDLoc DL(Op);
1696   MVT XLenVT = Subtarget.getXLenVT();
1697   MVT VT = Op.getSimpleValueType();
1698   unsigned NumElts = VT.getVectorNumElements();
1699   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1700 
1701   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1702 
1703   SDValue TrueMask, VL;
1704   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1705 
1706   if (SVN->isSplat()) {
1707     const int Lane = SVN->getSplatIndex();
1708     if (Lane >= 0) {
1709       MVT SVT = VT.getVectorElementType();
1710 
1711       // Turn splatted vector load into a strided load with an X0 stride.
1712       SDValue V = V1;
1713       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1714       // with undef.
1715       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1716       int Offset = Lane;
1717       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1718         int OpElements =
1719             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1720         V = V.getOperand(Offset / OpElements);
1721         Offset %= OpElements;
1722       }
1723 
1724       // We need to ensure the load isn't atomic or volatile.
1725       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1726         auto *Ld = cast<LoadSDNode>(V);
1727         Offset *= SVT.getStoreSize();
1728         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1729                                                    TypeSize::Fixed(Offset), DL);
1730 
1731         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1732         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1733           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1734           SDValue IntID =
1735               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1736           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1737                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1738           SDValue NewLoad = DAG.getMemIntrinsicNode(
1739               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1740               DAG.getMachineFunction().getMachineMemOperand(
1741                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1742           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1743           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1744         }
1745 
1746         // Otherwise use a scalar load and splat. This will give the best
1747         // opportunity to fold a splat into the operation. ISel can turn it into
1748         // the x0 strided load if we aren't able to fold away the select.
1749         if (SVT.isFloatingPoint())
1750           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1751                           Ld->getPointerInfo().getWithOffset(Offset),
1752                           Ld->getOriginalAlign(),
1753                           Ld->getMemOperand()->getFlags());
1754         else
1755           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1756                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1757                              Ld->getOriginalAlign(),
1758                              Ld->getMemOperand()->getFlags());
1759         DAG.makeEquivalentMemoryOrdering(Ld, V);
1760 
1761         unsigned Opc =
1762             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1763         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1764         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1765       }
1766 
1767       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1768       assert(Lane < (int)NumElts && "Unexpected lane!");
1769       SDValue Gather =
1770           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1771                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1772       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1773     }
1774   }
1775 
1776   // Detect shuffles which can be re-expressed as vector selects; these are
1777   // shuffles in which each element in the destination is taken from an element
1778   // at the corresponding index in either source vectors.
1779   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1780     int MaskIndex = MaskIdx.value();
1781     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1782   });
1783 
1784   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1785 
1786   SmallVector<SDValue> MaskVals;
1787   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1788   // merged with a second vrgather.
1789   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1790 
1791   // By default we preserve the original operand order, and use a mask to
1792   // select LHS as true and RHS as false. However, since RVV vector selects may
1793   // feature splats but only on the LHS, we may choose to invert our mask and
1794   // instead select between RHS and LHS.
1795   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1796   bool InvertMask = IsSelect == SwapOps;
1797 
1798   // Now construct the mask that will be used by the vselect or blended
1799   // vrgather operation. For vrgathers, construct the appropriate indices into
1800   // each vector.
1801   for (int MaskIndex : SVN->getMask()) {
1802     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1803     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1804     if (!IsSelect) {
1805       bool IsLHS = MaskIndex < (int)NumElts;
1806       // For "undef" elements of -1, shuffle in element 0 instead.
1807       GatherIndicesLHS.push_back(
1808           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1809       // TODO: If we're masking out unused elements anyway, it might produce
1810       // better code if we use the most-common element index instead of 0.
1811       GatherIndicesRHS.push_back(
1812           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1813     }
1814   }
1815 
1816   if (SwapOps) {
1817     std::swap(V1, V2);
1818     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1819   }
1820 
1821   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1822   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1823   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1824 
1825   if (IsSelect)
1826     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1827 
1828   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1829     // On such a large vector we're unable to use i8 as the index type.
1830     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1831     // may involve vector splitting if we're already at LMUL=8, or our
1832     // user-supplied maximum fixed-length LMUL.
1833     return SDValue();
1834   }
1835 
1836   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1837   MVT IndexVT = VT.changeTypeToInteger();
1838   // Since we can't introduce illegal index types at this stage, use i16 and
1839   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1840   // than XLenVT.
1841   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1842     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1843     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1844   }
1845 
1846   MVT IndexContainerVT =
1847       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1848 
1849   SDValue Gather;
1850   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1851   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1852   if (SDValue SplatValue = DAG.getSplatValue(V1)) {
1853     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1854   } else {
1855     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1856     LHSIndices =
1857         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1858 
1859     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1860     Gather =
1861         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1862   }
1863 
1864   // If a second vector operand is used by this shuffle, blend it in with an
1865   // additional vrgather.
1866   if (!V2.isUndef()) {
1867     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1868     SelectMask =
1869         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1870 
1871     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1872     RHSIndices =
1873         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1874 
1875     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1876     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1877     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1878                          Gather, VL);
1879   }
1880 
1881   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1882 }
1883 
1884 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1885                                      SDLoc DL, SelectionDAG &DAG,
1886                                      const RISCVSubtarget &Subtarget) {
1887   if (VT.isScalableVector())
1888     return DAG.getFPExtendOrRound(Op, DL, VT);
1889   assert(VT.isFixedLengthVector() &&
1890          "Unexpected value type for RVV FP extend/round lowering");
1891   SDValue Mask, VL;
1892   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1893   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1894                         ? RISCVISD::FP_EXTEND_VL
1895                         : RISCVISD::FP_ROUND_VL;
1896   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1897 }
1898 
1899 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1900                                             SelectionDAG &DAG) const {
1901   switch (Op.getOpcode()) {
1902   default:
1903     report_fatal_error("unimplemented operand");
1904   case ISD::GlobalAddress:
1905     return lowerGlobalAddress(Op, DAG);
1906   case ISD::BlockAddress:
1907     return lowerBlockAddress(Op, DAG);
1908   case ISD::ConstantPool:
1909     return lowerConstantPool(Op, DAG);
1910   case ISD::JumpTable:
1911     return lowerJumpTable(Op, DAG);
1912   case ISD::GlobalTLSAddress:
1913     return lowerGlobalTLSAddress(Op, DAG);
1914   case ISD::SELECT:
1915     return lowerSELECT(Op, DAG);
1916   case ISD::BRCOND:
1917     return lowerBRCOND(Op, DAG);
1918   case ISD::VASTART:
1919     return lowerVASTART(Op, DAG);
1920   case ISD::FRAMEADDR:
1921     return lowerFRAMEADDR(Op, DAG);
1922   case ISD::RETURNADDR:
1923     return lowerRETURNADDR(Op, DAG);
1924   case ISD::SHL_PARTS:
1925     return lowerShiftLeftParts(Op, DAG);
1926   case ISD::SRA_PARTS:
1927     return lowerShiftRightParts(Op, DAG, true);
1928   case ISD::SRL_PARTS:
1929     return lowerShiftRightParts(Op, DAG, false);
1930   case ISD::BITCAST: {
1931     SDLoc DL(Op);
1932     EVT VT = Op.getValueType();
1933     SDValue Op0 = Op.getOperand(0);
1934     EVT Op0VT = Op0.getValueType();
1935     MVT XLenVT = Subtarget.getXLenVT();
1936     if (VT.isFixedLengthVector()) {
1937       // We can handle fixed length vector bitcasts with a simple replacement
1938       // in isel.
1939       if (Op0VT.isFixedLengthVector())
1940         return Op;
1941       // When bitcasting from scalar to fixed-length vector, insert the scalar
1942       // into a one-element vector of the result type, and perform a vector
1943       // bitcast.
1944       if (!Op0VT.isVector()) {
1945         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
1946         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
1947                                               DAG.getUNDEF(BVT), Op0,
1948                                               DAG.getConstant(0, DL, XLenVT)));
1949       }
1950       return SDValue();
1951     }
1952     // Custom-legalize bitcasts from fixed-length vector types to scalar types
1953     // thus: bitcast the vector to a one-element vector type whose element type
1954     // is the same as the result type, and extract the first element.
1955     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
1956       LLVMContext &Context = *DAG.getContext();
1957       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
1958       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
1959                          DAG.getConstant(0, DL, XLenVT));
1960     }
1961     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
1962       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
1963       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1964       return FPConv;
1965     }
1966     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
1967         Subtarget.hasStdExtF()) {
1968       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1969       SDValue FPConv =
1970           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1971       return FPConv;
1972     }
1973     return SDValue();
1974   }
1975   case ISD::INTRINSIC_WO_CHAIN:
1976     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1977   case ISD::INTRINSIC_W_CHAIN:
1978     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1979   case ISD::BSWAP:
1980   case ISD::BITREVERSE: {
1981     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1982     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1983     MVT VT = Op.getSimpleValueType();
1984     SDLoc DL(Op);
1985     // Start with the maximum immediate value which is the bitwidth - 1.
1986     unsigned Imm = VT.getSizeInBits() - 1;
1987     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1988     if (Op.getOpcode() == ISD::BSWAP)
1989       Imm &= ~0x7U;
1990     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
1991                        DAG.getConstant(Imm, DL, VT));
1992   }
1993   case ISD::FSHL:
1994   case ISD::FSHR: {
1995     MVT VT = Op.getSimpleValueType();
1996     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1997     SDLoc DL(Op);
1998     if (Op.getOperand(2).getOpcode() == ISD::Constant)
1999       return Op;
2000     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2001     // use log(XLen) bits. Mask the shift amount accordingly.
2002     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2003     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2004                                 DAG.getConstant(ShAmtWidth, DL, VT));
2005     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2006     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2007   }
2008   case ISD::TRUNCATE: {
2009     SDLoc DL(Op);
2010     MVT VT = Op.getSimpleValueType();
2011     // Only custom-lower vector truncates
2012     if (!VT.isVector())
2013       return Op;
2014 
2015     // Truncates to mask types are handled differently
2016     if (VT.getVectorElementType() == MVT::i1)
2017       return lowerVectorMaskTrunc(Op, DAG);
2018 
2019     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2020     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2021     // truncate by one power of two at a time.
2022     MVT DstEltVT = VT.getVectorElementType();
2023 
2024     SDValue Src = Op.getOperand(0);
2025     MVT SrcVT = Src.getSimpleValueType();
2026     MVT SrcEltVT = SrcVT.getVectorElementType();
2027 
2028     assert(DstEltVT.bitsLT(SrcEltVT) &&
2029            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2030            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2031            "Unexpected vector truncate lowering");
2032 
2033     MVT ContainerVT = SrcVT;
2034     if (SrcVT.isFixedLengthVector()) {
2035       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2036       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2037     }
2038 
2039     SDValue Result = Src;
2040     SDValue Mask, VL;
2041     std::tie(Mask, VL) =
2042         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2043     LLVMContext &Context = *DAG.getContext();
2044     const ElementCount Count = ContainerVT.getVectorElementCount();
2045     do {
2046       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2047       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2048       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2049                            Mask, VL);
2050     } while (SrcEltVT != DstEltVT);
2051 
2052     if (SrcVT.isFixedLengthVector())
2053       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2054 
2055     return Result;
2056   }
2057   case ISD::ANY_EXTEND:
2058   case ISD::ZERO_EXTEND:
2059     if (Op.getOperand(0).getValueType().isVector() &&
2060         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2061       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2062     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2063   case ISD::SIGN_EXTEND:
2064     if (Op.getOperand(0).getValueType().isVector() &&
2065         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2066       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2067     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2068   case ISD::SPLAT_VECTOR_PARTS:
2069     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2070   case ISD::INSERT_VECTOR_ELT:
2071     return lowerINSERT_VECTOR_ELT(Op, DAG);
2072   case ISD::EXTRACT_VECTOR_ELT:
2073     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2074   case ISD::VSCALE: {
2075     MVT VT = Op.getSimpleValueType();
2076     SDLoc DL(Op);
2077     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2078     // We define our scalable vector types for lmul=1 to use a 64 bit known
2079     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2080     // vscale as VLENB / 8.
2081     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2082     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2083                                  DAG.getConstant(3, DL, VT));
2084     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2085   }
2086   case ISD::FP_EXTEND: {
2087     // RVV can only do fp_extend to types double the size as the source. We
2088     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2089     // via f32.
2090     SDLoc DL(Op);
2091     MVT VT = Op.getSimpleValueType();
2092     SDValue Src = Op.getOperand(0);
2093     MVT SrcVT = Src.getSimpleValueType();
2094 
2095     // Prepare any fixed-length vector operands.
2096     MVT ContainerVT = VT;
2097     if (SrcVT.isFixedLengthVector()) {
2098       ContainerVT = getContainerForFixedLengthVector(VT);
2099       MVT SrcContainerVT =
2100           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2101       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2102     }
2103 
2104     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2105         SrcVT.getVectorElementType() != MVT::f16) {
2106       // For scalable vectors, we only need to close the gap between
2107       // vXf16->vXf64.
2108       if (!VT.isFixedLengthVector())
2109         return Op;
2110       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2111       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2112       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2113     }
2114 
2115     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2116     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2117     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2118         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2119 
2120     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2121                                            DL, DAG, Subtarget);
2122     if (VT.isFixedLengthVector())
2123       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2124     return Extend;
2125   }
2126   case ISD::FP_ROUND: {
2127     // RVV can only do fp_round to types half the size as the source. We
2128     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2129     // conversion instruction.
2130     SDLoc DL(Op);
2131     MVT VT = Op.getSimpleValueType();
2132     SDValue Src = Op.getOperand(0);
2133     MVT SrcVT = Src.getSimpleValueType();
2134 
2135     // Prepare any fixed-length vector operands.
2136     MVT ContainerVT = VT;
2137     if (VT.isFixedLengthVector()) {
2138       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2139       ContainerVT =
2140           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2141       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2142     }
2143 
2144     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2145         SrcVT.getVectorElementType() != MVT::f64) {
2146       // For scalable vectors, we only need to close the gap between
2147       // vXf64<->vXf16.
2148       if (!VT.isFixedLengthVector())
2149         return Op;
2150       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2151       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2152       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2153     }
2154 
2155     SDValue Mask, VL;
2156     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2157 
2158     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2159     SDValue IntermediateRound =
2160         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2161     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2162                                           DL, DAG, Subtarget);
2163 
2164     if (VT.isFixedLengthVector())
2165       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2166     return Round;
2167   }
2168   case ISD::FP_TO_SINT:
2169   case ISD::FP_TO_UINT:
2170   case ISD::SINT_TO_FP:
2171   case ISD::UINT_TO_FP: {
2172     // RVV can only do fp<->int conversions to types half/double the size as
2173     // the source. We custom-lower any conversions that do two hops into
2174     // sequences.
2175     MVT VT = Op.getSimpleValueType();
2176     if (!VT.isVector())
2177       return Op;
2178     SDLoc DL(Op);
2179     SDValue Src = Op.getOperand(0);
2180     MVT EltVT = VT.getVectorElementType();
2181     MVT SrcVT = Src.getSimpleValueType();
2182     MVT SrcEltVT = SrcVT.getVectorElementType();
2183     unsigned EltSize = EltVT.getSizeInBits();
2184     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2185     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2186            "Unexpected vector element types");
2187 
2188     bool IsInt2FP = SrcEltVT.isInteger();
2189     // Widening conversions
2190     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2191       if (IsInt2FP) {
2192         // Do a regular integer sign/zero extension then convert to float.
2193         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2194                                       VT.getVectorElementCount());
2195         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2196                                  ? ISD::ZERO_EXTEND
2197                                  : ISD::SIGN_EXTEND;
2198         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2199         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2200       }
2201       // FP2Int
2202       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2203       // Do one doubling fp_extend then complete the operation by converting
2204       // to int.
2205       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2206       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2207       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2208     }
2209 
2210     // Narrowing conversions
2211     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2212       if (IsInt2FP) {
2213         // One narrowing int_to_fp, then an fp_round.
2214         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2215         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2216         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2217         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2218       }
2219       // FP2Int
2220       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2221       // representable by the integer, the result is poison.
2222       MVT IVecVT =
2223           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2224                            VT.getVectorElementCount());
2225       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2226       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2227     }
2228 
2229     // Scalable vectors can exit here. Patterns will handle equally-sized
2230     // conversions halving/doubling ones.
2231     if (!VT.isFixedLengthVector())
2232       return Op;
2233 
2234     // For fixed-length vectors we lower to a custom "VL" node.
2235     unsigned RVVOpc = 0;
2236     switch (Op.getOpcode()) {
2237     default:
2238       llvm_unreachable("Impossible opcode");
2239     case ISD::FP_TO_SINT:
2240       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2241       break;
2242     case ISD::FP_TO_UINT:
2243       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2244       break;
2245     case ISD::SINT_TO_FP:
2246       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2247       break;
2248     case ISD::UINT_TO_FP:
2249       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2250       break;
2251     }
2252 
2253     MVT ContainerVT, SrcContainerVT;
2254     // Derive the reference container type from the larger vector type.
2255     if (SrcEltSize > EltSize) {
2256       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2257       ContainerVT =
2258           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2259     } else {
2260       ContainerVT = getContainerForFixedLengthVector(VT);
2261       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2262     }
2263 
2264     SDValue Mask, VL;
2265     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2266 
2267     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2268     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2269     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2270   }
2271   case ISD::VECREDUCE_ADD:
2272   case ISD::VECREDUCE_UMAX:
2273   case ISD::VECREDUCE_SMAX:
2274   case ISD::VECREDUCE_UMIN:
2275   case ISD::VECREDUCE_SMIN:
2276     return lowerVECREDUCE(Op, DAG);
2277   case ISD::VECREDUCE_AND:
2278   case ISD::VECREDUCE_OR:
2279   case ISD::VECREDUCE_XOR:
2280     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2281       return lowerVectorMaskVECREDUCE(Op, DAG);
2282     return lowerVECREDUCE(Op, DAG);
2283   case ISD::VECREDUCE_FADD:
2284   case ISD::VECREDUCE_SEQ_FADD:
2285   case ISD::VECREDUCE_FMIN:
2286   case ISD::VECREDUCE_FMAX:
2287     return lowerFPVECREDUCE(Op, DAG);
2288   case ISD::INSERT_SUBVECTOR:
2289     return lowerINSERT_SUBVECTOR(Op, DAG);
2290   case ISD::EXTRACT_SUBVECTOR:
2291     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2292   case ISD::STEP_VECTOR:
2293     return lowerSTEP_VECTOR(Op, DAG);
2294   case ISD::VECTOR_REVERSE:
2295     return lowerVECTOR_REVERSE(Op, DAG);
2296   case ISD::BUILD_VECTOR:
2297     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2298   case ISD::SPLAT_VECTOR:
2299     if (Op.getValueType().getVectorElementType() == MVT::i1)
2300       return lowerVectorMaskSplat(Op, DAG);
2301     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2302   case ISD::VECTOR_SHUFFLE:
2303     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2304   case ISD::CONCAT_VECTORS: {
2305     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2306     // better than going through the stack, as the default expansion does.
2307     SDLoc DL(Op);
2308     MVT VT = Op.getSimpleValueType();
2309     unsigned NumOpElts =
2310         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2311     SDValue Vec = DAG.getUNDEF(VT);
2312     for (const auto &OpIdx : enumerate(Op->ops()))
2313       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2314                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2315     return Vec;
2316   }
2317   case ISD::LOAD:
2318     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2319   case ISD::STORE:
2320     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2321   case ISD::MLOAD:
2322     return lowerMLOAD(Op, DAG);
2323   case ISD::MSTORE:
2324     return lowerMSTORE(Op, DAG);
2325   case ISD::SETCC:
2326     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2327   case ISD::ADD:
2328     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2329   case ISD::SUB:
2330     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2331   case ISD::MUL:
2332     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2333   case ISD::MULHS:
2334     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2335   case ISD::MULHU:
2336     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2337   case ISD::AND:
2338     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2339                                               RISCVISD::AND_VL);
2340   case ISD::OR:
2341     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2342                                               RISCVISD::OR_VL);
2343   case ISD::XOR:
2344     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2345                                               RISCVISD::XOR_VL);
2346   case ISD::SDIV:
2347     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2348   case ISD::SREM:
2349     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2350   case ISD::UDIV:
2351     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2352   case ISD::UREM:
2353     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2354   case ISD::SHL:
2355     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2356   case ISD::SRA:
2357     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2358   case ISD::SRL:
2359     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2360   case ISD::FADD:
2361     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2362   case ISD::FSUB:
2363     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2364   case ISD::FMUL:
2365     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2366   case ISD::FDIV:
2367     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2368   case ISD::FNEG:
2369     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2370   case ISD::FABS:
2371     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2372   case ISD::FSQRT:
2373     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2374   case ISD::FMA:
2375     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2376   case ISD::SMIN:
2377     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2378   case ISD::SMAX:
2379     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2380   case ISD::UMIN:
2381     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2382   case ISD::UMAX:
2383     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2384   case ISD::FMINNUM:
2385     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2386   case ISD::FMAXNUM:
2387     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2388   case ISD::ABS:
2389     return lowerABS(Op, DAG);
2390   case ISD::VSELECT:
2391     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2392   case ISD::FCOPYSIGN:
2393     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2394   case ISD::MGATHER:
2395     return lowerMGATHER(Op, DAG);
2396   case ISD::MSCATTER:
2397     return lowerMSCATTER(Op, DAG);
2398   case ISD::FLT_ROUNDS_:
2399     return lowerGET_ROUNDING(Op, DAG);
2400   case ISD::SET_ROUNDING:
2401     return lowerSET_ROUNDING(Op, DAG);
2402   case ISD::VP_ADD:
2403     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2404   case ISD::VP_SUB:
2405     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2406   case ISD::VP_MUL:
2407     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2408   case ISD::VP_SDIV:
2409     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2410   case ISD::VP_UDIV:
2411     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2412   case ISD::VP_SREM:
2413     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2414   case ISD::VP_UREM:
2415     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2416   case ISD::VP_AND:
2417     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2418   case ISD::VP_OR:
2419     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2420   case ISD::VP_XOR:
2421     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2422   case ISD::VP_ASHR:
2423     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2424   case ISD::VP_LSHR:
2425     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2426   case ISD::VP_SHL:
2427     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2428   }
2429 }
2430 
2431 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2432                              SelectionDAG &DAG, unsigned Flags) {
2433   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2434 }
2435 
2436 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2437                              SelectionDAG &DAG, unsigned Flags) {
2438   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2439                                    Flags);
2440 }
2441 
2442 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2443                              SelectionDAG &DAG, unsigned Flags) {
2444   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2445                                    N->getOffset(), Flags);
2446 }
2447 
2448 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2449                              SelectionDAG &DAG, unsigned Flags) {
2450   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2451 }
2452 
2453 template <class NodeTy>
2454 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2455                                      bool IsLocal) const {
2456   SDLoc DL(N);
2457   EVT Ty = getPointerTy(DAG.getDataLayout());
2458 
2459   if (isPositionIndependent()) {
2460     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2461     if (IsLocal)
2462       // Use PC-relative addressing to access the symbol. This generates the
2463       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2464       // %pcrel_lo(auipc)).
2465       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2466 
2467     // Use PC-relative addressing to access the GOT for this symbol, then load
2468     // the address from the GOT. This generates the pattern (PseudoLA sym),
2469     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2470     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2471   }
2472 
2473   switch (getTargetMachine().getCodeModel()) {
2474   default:
2475     report_fatal_error("Unsupported code model for lowering");
2476   case CodeModel::Small: {
2477     // Generate a sequence for accessing addresses within the first 2 GiB of
2478     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2479     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2480     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2481     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2482     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2483   }
2484   case CodeModel::Medium: {
2485     // Generate a sequence for accessing addresses within any 2GiB range within
2486     // the address space. This generates the pattern (PseudoLLA sym), which
2487     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2488     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2489     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2490   }
2491   }
2492 }
2493 
2494 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2495                                                 SelectionDAG &DAG) const {
2496   SDLoc DL(Op);
2497   EVT Ty = Op.getValueType();
2498   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2499   int64_t Offset = N->getOffset();
2500   MVT XLenVT = Subtarget.getXLenVT();
2501 
2502   const GlobalValue *GV = N->getGlobal();
2503   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2504   SDValue Addr = getAddr(N, DAG, IsLocal);
2505 
2506   // In order to maximise the opportunity for common subexpression elimination,
2507   // emit a separate ADD node for the global address offset instead of folding
2508   // it in the global address node. Later peephole optimisations may choose to
2509   // fold it back in when profitable.
2510   if (Offset != 0)
2511     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2512                        DAG.getConstant(Offset, DL, XLenVT));
2513   return Addr;
2514 }
2515 
2516 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2517                                                SelectionDAG &DAG) const {
2518   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2519 
2520   return getAddr(N, DAG);
2521 }
2522 
2523 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2524                                                SelectionDAG &DAG) const {
2525   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2526 
2527   return getAddr(N, DAG);
2528 }
2529 
2530 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2531                                             SelectionDAG &DAG) const {
2532   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2533 
2534   return getAddr(N, DAG);
2535 }
2536 
2537 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2538                                               SelectionDAG &DAG,
2539                                               bool UseGOT) const {
2540   SDLoc DL(N);
2541   EVT Ty = getPointerTy(DAG.getDataLayout());
2542   const GlobalValue *GV = N->getGlobal();
2543   MVT XLenVT = Subtarget.getXLenVT();
2544 
2545   if (UseGOT) {
2546     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2547     // load the address from the GOT and add the thread pointer. This generates
2548     // the pattern (PseudoLA_TLS_IE sym), which expands to
2549     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2550     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2551     SDValue Load =
2552         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2553 
2554     // Add the thread pointer.
2555     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2556     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2557   }
2558 
2559   // Generate a sequence for accessing the address relative to the thread
2560   // pointer, with the appropriate adjustment for the thread pointer offset.
2561   // This generates the pattern
2562   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2563   SDValue AddrHi =
2564       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2565   SDValue AddrAdd =
2566       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2567   SDValue AddrLo =
2568       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2569 
2570   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2571   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2572   SDValue MNAdd = SDValue(
2573       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2574       0);
2575   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2576 }
2577 
2578 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2579                                                SelectionDAG &DAG) const {
2580   SDLoc DL(N);
2581   EVT Ty = getPointerTy(DAG.getDataLayout());
2582   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2583   const GlobalValue *GV = N->getGlobal();
2584 
2585   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2586   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2587   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2588   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2589   SDValue Load =
2590       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2591 
2592   // Prepare argument list to generate call.
2593   ArgListTy Args;
2594   ArgListEntry Entry;
2595   Entry.Node = Load;
2596   Entry.Ty = CallTy;
2597   Args.push_back(Entry);
2598 
2599   // Setup call to __tls_get_addr.
2600   TargetLowering::CallLoweringInfo CLI(DAG);
2601   CLI.setDebugLoc(DL)
2602       .setChain(DAG.getEntryNode())
2603       .setLibCallee(CallingConv::C, CallTy,
2604                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2605                     std::move(Args));
2606 
2607   return LowerCallTo(CLI).first;
2608 }
2609 
2610 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2611                                                    SelectionDAG &DAG) const {
2612   SDLoc DL(Op);
2613   EVT Ty = Op.getValueType();
2614   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2615   int64_t Offset = N->getOffset();
2616   MVT XLenVT = Subtarget.getXLenVT();
2617 
2618   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2619 
2620   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2621       CallingConv::GHC)
2622     report_fatal_error("In GHC calling convention TLS is not supported");
2623 
2624   SDValue Addr;
2625   switch (Model) {
2626   case TLSModel::LocalExec:
2627     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2628     break;
2629   case TLSModel::InitialExec:
2630     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2631     break;
2632   case TLSModel::LocalDynamic:
2633   case TLSModel::GeneralDynamic:
2634     Addr = getDynamicTLSAddr(N, DAG);
2635     break;
2636   }
2637 
2638   // In order to maximise the opportunity for common subexpression elimination,
2639   // emit a separate ADD node for the global address offset instead of folding
2640   // it in the global address node. Later peephole optimisations may choose to
2641   // fold it back in when profitable.
2642   if (Offset != 0)
2643     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2644                        DAG.getConstant(Offset, DL, XLenVT));
2645   return Addr;
2646 }
2647 
2648 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2649   SDValue CondV = Op.getOperand(0);
2650   SDValue TrueV = Op.getOperand(1);
2651   SDValue FalseV = Op.getOperand(2);
2652   SDLoc DL(Op);
2653   MVT XLenVT = Subtarget.getXLenVT();
2654 
2655   // If the result type is XLenVT and CondV is the output of a SETCC node
2656   // which also operated on XLenVT inputs, then merge the SETCC node into the
2657   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2658   // compare+branch instructions. i.e.:
2659   // (select (setcc lhs, rhs, cc), truev, falsev)
2660   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2661   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2662       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2663     SDValue LHS = CondV.getOperand(0);
2664     SDValue RHS = CondV.getOperand(1);
2665     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2666     ISD::CondCode CCVal = CC->get();
2667 
2668     // Special case for a select of 2 constants that have a diffence of 1.
2669     // Normally this is done by DAGCombine, but if the select is introduced by
2670     // type legalization or op legalization, we miss it. Restricting to SETLT
2671     // case for now because that is what signed saturating add/sub need.
2672     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2673     // but we would probably want to swap the true/false values if the condition
2674     // is SETGE/SETLE to avoid an XORI.
2675     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2676         CCVal == ISD::SETLT) {
2677       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2678       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2679       if (TrueVal - 1 == FalseVal)
2680         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2681       if (TrueVal + 1 == FalseVal)
2682         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2683     }
2684 
2685     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2686 
2687     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2688     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2689     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2690   }
2691 
2692   // Otherwise:
2693   // (select condv, truev, falsev)
2694   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2695   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2696   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2697 
2698   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2699 
2700   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2701 }
2702 
2703 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2704   SDValue CondV = Op.getOperand(1);
2705   SDLoc DL(Op);
2706   MVT XLenVT = Subtarget.getXLenVT();
2707 
2708   if (CondV.getOpcode() == ISD::SETCC &&
2709       CondV.getOperand(0).getValueType() == XLenVT) {
2710     SDValue LHS = CondV.getOperand(0);
2711     SDValue RHS = CondV.getOperand(1);
2712     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2713 
2714     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2715 
2716     SDValue TargetCC = DAG.getCondCode(CCVal);
2717     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2718                        LHS, RHS, TargetCC, Op.getOperand(2));
2719   }
2720 
2721   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2722                      CondV, DAG.getConstant(0, DL, XLenVT),
2723                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2724 }
2725 
2726 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2727   MachineFunction &MF = DAG.getMachineFunction();
2728   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2729 
2730   SDLoc DL(Op);
2731   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2732                                  getPointerTy(MF.getDataLayout()));
2733 
2734   // vastart just stores the address of the VarArgsFrameIndex slot into the
2735   // memory location argument.
2736   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2737   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2738                       MachinePointerInfo(SV));
2739 }
2740 
2741 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2742                                             SelectionDAG &DAG) const {
2743   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2744   MachineFunction &MF = DAG.getMachineFunction();
2745   MachineFrameInfo &MFI = MF.getFrameInfo();
2746   MFI.setFrameAddressIsTaken(true);
2747   Register FrameReg = RI.getFrameRegister(MF);
2748   int XLenInBytes = Subtarget.getXLen() / 8;
2749 
2750   EVT VT = Op.getValueType();
2751   SDLoc DL(Op);
2752   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2753   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2754   while (Depth--) {
2755     int Offset = -(XLenInBytes * 2);
2756     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2757                               DAG.getIntPtrConstant(Offset, DL));
2758     FrameAddr =
2759         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2760   }
2761   return FrameAddr;
2762 }
2763 
2764 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2765                                              SelectionDAG &DAG) const {
2766   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2767   MachineFunction &MF = DAG.getMachineFunction();
2768   MachineFrameInfo &MFI = MF.getFrameInfo();
2769   MFI.setReturnAddressIsTaken(true);
2770   MVT XLenVT = Subtarget.getXLenVT();
2771   int XLenInBytes = Subtarget.getXLen() / 8;
2772 
2773   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2774     return SDValue();
2775 
2776   EVT VT = Op.getValueType();
2777   SDLoc DL(Op);
2778   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2779   if (Depth) {
2780     int Off = -XLenInBytes;
2781     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2782     SDValue Offset = DAG.getConstant(Off, DL, VT);
2783     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2784                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2785                        MachinePointerInfo());
2786   }
2787 
2788   // Return the value of the return address register, marking it an implicit
2789   // live-in.
2790   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2791   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2792 }
2793 
2794 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2795                                                  SelectionDAG &DAG) const {
2796   SDLoc DL(Op);
2797   SDValue Lo = Op.getOperand(0);
2798   SDValue Hi = Op.getOperand(1);
2799   SDValue Shamt = Op.getOperand(2);
2800   EVT VT = Lo.getValueType();
2801 
2802   // if Shamt-XLEN < 0: // Shamt < XLEN
2803   //   Lo = Lo << Shamt
2804   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2805   // else:
2806   //   Lo = 0
2807   //   Hi = Lo << (Shamt-XLEN)
2808 
2809   SDValue Zero = DAG.getConstant(0, DL, VT);
2810   SDValue One = DAG.getConstant(1, DL, VT);
2811   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2812   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2813   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2814   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2815 
2816   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2817   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2818   SDValue ShiftRightLo =
2819       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2820   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2821   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2822   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2823 
2824   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2825 
2826   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2827   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2828 
2829   SDValue Parts[2] = {Lo, Hi};
2830   return DAG.getMergeValues(Parts, DL);
2831 }
2832 
2833 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2834                                                   bool IsSRA) const {
2835   SDLoc DL(Op);
2836   SDValue Lo = Op.getOperand(0);
2837   SDValue Hi = Op.getOperand(1);
2838   SDValue Shamt = Op.getOperand(2);
2839   EVT VT = Lo.getValueType();
2840 
2841   // SRA expansion:
2842   //   if Shamt-XLEN < 0: // Shamt < XLEN
2843   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2844   //     Hi = Hi >>s Shamt
2845   //   else:
2846   //     Lo = Hi >>s (Shamt-XLEN);
2847   //     Hi = Hi >>s (XLEN-1)
2848   //
2849   // SRL expansion:
2850   //   if Shamt-XLEN < 0: // Shamt < XLEN
2851   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2852   //     Hi = Hi >>u Shamt
2853   //   else:
2854   //     Lo = Hi >>u (Shamt-XLEN);
2855   //     Hi = 0;
2856 
2857   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2858 
2859   SDValue Zero = DAG.getConstant(0, DL, VT);
2860   SDValue One = DAG.getConstant(1, DL, VT);
2861   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2862   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2863   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2864   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2865 
2866   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2867   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2868   SDValue ShiftLeftHi =
2869       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2870   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2871   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2872   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2873   SDValue HiFalse =
2874       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2875 
2876   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2877 
2878   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2879   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2880 
2881   SDValue Parts[2] = {Lo, Hi};
2882   return DAG.getMergeValues(Parts, DL);
2883 }
2884 
2885 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2886 // legal equivalently-sized i8 type, so we can use that as a go-between.
2887 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
2888                                                   SelectionDAG &DAG) const {
2889   SDLoc DL(Op);
2890   MVT VT = Op.getSimpleValueType();
2891   SDValue SplatVal = Op.getOperand(0);
2892   // All-zeros or all-ones splats are handled specially.
2893   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
2894     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2895     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
2896   }
2897   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
2898     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2899     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
2900   }
2901   MVT XLenVT = Subtarget.getXLenVT();
2902   assert(SplatVal.getValueType() == XLenVT &&
2903          "Unexpected type for i1 splat value");
2904   MVT InterVT = VT.changeVectorElementType(MVT::i8);
2905   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
2906                          DAG.getConstant(1, DL, XLenVT));
2907   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
2908   SDValue Zero = DAG.getConstant(0, DL, InterVT);
2909   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
2910 }
2911 
2912 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2913 // illegal (currently only vXi64 RV32).
2914 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2915 // them to SPLAT_VECTOR_I64
2916 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2917                                                      SelectionDAG &DAG) const {
2918   SDLoc DL(Op);
2919   MVT VecVT = Op.getSimpleValueType();
2920   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2921          "Unexpected SPLAT_VECTOR_PARTS lowering");
2922 
2923   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2924   SDValue Lo = Op.getOperand(0);
2925   SDValue Hi = Op.getOperand(1);
2926 
2927   if (VecVT.isFixedLengthVector()) {
2928     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2929     SDLoc DL(Op);
2930     SDValue Mask, VL;
2931     std::tie(Mask, VL) =
2932         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2933 
2934     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
2935     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
2936   }
2937 
2938   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2939     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2940     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2941     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2942     // node in order to try and match RVV vector/scalar instructions.
2943     if ((LoC >> 31) == HiC)
2944       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2945   }
2946 
2947   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
2948   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
2949       isa<ConstantSDNode>(Hi.getOperand(1)) &&
2950       Hi.getConstantOperandVal(1) == 31)
2951     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2952 
2953   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
2954   return splatPartsI64ThroughStack(DL, VecVT, Lo, Hi,
2955                                    DAG.getRegister(RISCV::X0, MVT::i64), DAG);
2956 }
2957 
2958 // Custom-lower extensions from mask vectors by using a vselect either with 1
2959 // for zero/any-extension or -1 for sign-extension:
2960 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2961 // Note that any-extension is lowered identically to zero-extension.
2962 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2963                                                 int64_t ExtTrueVal) const {
2964   SDLoc DL(Op);
2965   MVT VecVT = Op.getSimpleValueType();
2966   SDValue Src = Op.getOperand(0);
2967   // Only custom-lower extensions from mask types
2968   assert(Src.getValueType().isVector() &&
2969          Src.getValueType().getVectorElementType() == MVT::i1);
2970 
2971   MVT XLenVT = Subtarget.getXLenVT();
2972   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2973   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2974 
2975   if (VecVT.isScalableVector()) {
2976     // Be careful not to introduce illegal scalar types at this stage, and be
2977     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2978     // illegal and must be expanded. Since we know that the constants are
2979     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2980     bool IsRV32E64 =
2981         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2982 
2983     if (!IsRV32E64) {
2984       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2985       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2986     } else {
2987       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2988       SplatTrueVal =
2989           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2990     }
2991 
2992     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2993   }
2994 
2995   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2996   MVT I1ContainerVT =
2997       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2998 
2999   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3000 
3001   SDValue Mask, VL;
3002   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3003 
3004   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3005   SplatTrueVal =
3006       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3007   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3008                                SplatTrueVal, SplatZero, VL);
3009 
3010   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3011 }
3012 
3013 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3014     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3015   MVT ExtVT = Op.getSimpleValueType();
3016   // Only custom-lower extensions from fixed-length vector types.
3017   if (!ExtVT.isFixedLengthVector())
3018     return Op;
3019   MVT VT = Op.getOperand(0).getSimpleValueType();
3020   // Grab the canonical container type for the extended type. Infer the smaller
3021   // type from that to ensure the same number of vector elements, as we know
3022   // the LMUL will be sufficient to hold the smaller type.
3023   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3024   // Get the extended container type manually to ensure the same number of
3025   // vector elements between source and dest.
3026   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3027                                      ContainerExtVT.getVectorElementCount());
3028 
3029   SDValue Op1 =
3030       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3031 
3032   SDLoc DL(Op);
3033   SDValue Mask, VL;
3034   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3035 
3036   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3037 
3038   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3039 }
3040 
3041 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3042 // setcc operation:
3043 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3044 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3045                                                   SelectionDAG &DAG) const {
3046   SDLoc DL(Op);
3047   EVT MaskVT = Op.getValueType();
3048   // Only expect to custom-lower truncations to mask types
3049   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3050          "Unexpected type for vector mask lowering");
3051   SDValue Src = Op.getOperand(0);
3052   MVT VecVT = Src.getSimpleValueType();
3053 
3054   // If this is a fixed vector, we need to convert it to a scalable vector.
3055   MVT ContainerVT = VecVT;
3056   if (VecVT.isFixedLengthVector()) {
3057     ContainerVT = getContainerForFixedLengthVector(VecVT);
3058     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3059   }
3060 
3061   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3062   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3063 
3064   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3065   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3066 
3067   if (VecVT.isScalableVector()) {
3068     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3069     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3070   }
3071 
3072   SDValue Mask, VL;
3073   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3074 
3075   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3076   SDValue Trunc =
3077       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3078   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3079                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3080   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3081 }
3082 
3083 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3084 // first position of a vector, and that vector is slid up to the insert index.
3085 // By limiting the active vector length to index+1 and merging with the
3086 // original vector (with an undisturbed tail policy for elements >= VL), we
3087 // achieve the desired result of leaving all elements untouched except the one
3088 // at VL-1, which is replaced with the desired value.
3089 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3090                                                     SelectionDAG &DAG) const {
3091   SDLoc DL(Op);
3092   MVT VecVT = Op.getSimpleValueType();
3093   SDValue Vec = Op.getOperand(0);
3094   SDValue Val = Op.getOperand(1);
3095   SDValue Idx = Op.getOperand(2);
3096 
3097   if (VecVT.getVectorElementType() == MVT::i1) {
3098     // FIXME: For now we just promote to an i8 vector and insert into that,
3099     // but this is probably not optimal.
3100     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3101     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3102     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3103     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3104   }
3105 
3106   MVT ContainerVT = VecVT;
3107   // If the operand is a fixed-length vector, convert to a scalable one.
3108   if (VecVT.isFixedLengthVector()) {
3109     ContainerVT = getContainerForFixedLengthVector(VecVT);
3110     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3111   }
3112 
3113   MVT XLenVT = Subtarget.getXLenVT();
3114 
3115   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3116   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3117   // Even i64-element vectors on RV32 can be lowered without scalar
3118   // legalization if the most-significant 32 bits of the value are not affected
3119   // by the sign-extension of the lower 32 bits.
3120   // TODO: We could also catch sign extensions of a 32-bit value.
3121   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3122     const auto *CVal = cast<ConstantSDNode>(Val);
3123     if (isInt<32>(CVal->getSExtValue())) {
3124       IsLegalInsert = true;
3125       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3126     }
3127   }
3128 
3129   SDValue Mask, VL;
3130   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3131 
3132   SDValue ValInVec;
3133 
3134   if (IsLegalInsert) {
3135     unsigned Opc =
3136         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3137     if (isNullConstant(Idx)) {
3138       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3139       if (!VecVT.isFixedLengthVector())
3140         return Vec;
3141       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3142     }
3143     ValInVec =
3144         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3145   } else {
3146     // On RV32, i64-element vectors must be specially handled to place the
3147     // value at element 0, by using two vslide1up instructions in sequence on
3148     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3149     // this.
3150     SDValue One = DAG.getConstant(1, DL, XLenVT);
3151     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3152     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3153     MVT I32ContainerVT =
3154         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3155     SDValue I32Mask =
3156         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3157     // Limit the active VL to two.
3158     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3159     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3160     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3161     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3162                            InsertI64VL);
3163     // First slide in the hi value, then the lo in underneath it.
3164     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3165                            ValHi, I32Mask, InsertI64VL);
3166     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3167                            ValLo, I32Mask, InsertI64VL);
3168     // Bitcast back to the right container type.
3169     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3170   }
3171 
3172   // Now that the value is in a vector, slide it into position.
3173   SDValue InsertVL =
3174       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3175   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3176                                 ValInVec, Idx, Mask, InsertVL);
3177   if (!VecVT.isFixedLengthVector())
3178     return Slideup;
3179   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3180 }
3181 
3182 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3183 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3184 // types this is done using VMV_X_S to allow us to glean information about the
3185 // sign bits of the result.
3186 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3187                                                      SelectionDAG &DAG) const {
3188   SDLoc DL(Op);
3189   SDValue Idx = Op.getOperand(1);
3190   SDValue Vec = Op.getOperand(0);
3191   EVT EltVT = Op.getValueType();
3192   MVT VecVT = Vec.getSimpleValueType();
3193   MVT XLenVT = Subtarget.getXLenVT();
3194 
3195   if (VecVT.getVectorElementType() == MVT::i1) {
3196     // FIXME: For now we just promote to an i8 vector and extract from that,
3197     // but this is probably not optimal.
3198     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3199     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3200     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3201   }
3202 
3203   // If this is a fixed vector, we need to convert it to a scalable vector.
3204   MVT ContainerVT = VecVT;
3205   if (VecVT.isFixedLengthVector()) {
3206     ContainerVT = getContainerForFixedLengthVector(VecVT);
3207     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3208   }
3209 
3210   // If the index is 0, the vector is already in the right position.
3211   if (!isNullConstant(Idx)) {
3212     // Use a VL of 1 to avoid processing more elements than we need.
3213     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3214     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3215     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3216     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3217                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3218   }
3219 
3220   if (!EltVT.isInteger()) {
3221     // Floating-point extracts are handled in TableGen.
3222     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3223                        DAG.getConstant(0, DL, XLenVT));
3224   }
3225 
3226   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3227   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3228 }
3229 
3230 // Some RVV intrinsics may claim that they want an integer operand to be
3231 // promoted or expanded.
3232 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3233                                           const RISCVSubtarget &Subtarget) {
3234   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3235           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3236          "Unexpected opcode");
3237 
3238   if (!Subtarget.hasStdExtV())
3239     return SDValue();
3240 
3241   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3242   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3243   SDLoc DL(Op);
3244 
3245   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3246       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3247   if (!II || !II->SplatOperand)
3248     return SDValue();
3249 
3250   unsigned SplatOp = II->SplatOperand + HasChain;
3251   assert(SplatOp < Op.getNumOperands());
3252 
3253   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3254   SDValue &ScalarOp = Operands[SplatOp];
3255   MVT OpVT = ScalarOp.getSimpleValueType();
3256   MVT XLenVT = Subtarget.getXLenVT();
3257 
3258   // If this isn't a scalar, or its type is XLenVT we're done.
3259   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3260     return SDValue();
3261 
3262   // Simplest case is that the operand needs to be promoted to XLenVT.
3263   if (OpVT.bitsLT(XLenVT)) {
3264     // If the operand is a constant, sign extend to increase our chances
3265     // of being able to use a .vi instruction. ANY_EXTEND would become a
3266     // a zero extend and the simm5 check in isel would fail.
3267     // FIXME: Should we ignore the upper bits in isel instead?
3268     unsigned ExtOpc =
3269         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3270     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3271     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3272   }
3273 
3274   // Use the previous operand to get the vXi64 VT. The result might be a mask
3275   // VT for compares. Using the previous operand assumes that the previous
3276   // operand will never have a smaller element size than a scalar operand and
3277   // that a widening operation never uses SEW=64.
3278   // NOTE: If this fails the below assert, we can probably just find the
3279   // element count from any operand or result and use it to construct the VT.
3280   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3281   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3282 
3283   // The more complex case is when the scalar is larger than XLenVT.
3284   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3285          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3286 
3287   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3288   // on the instruction to sign-extend since SEW>XLEN.
3289   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3290     if (isInt<32>(CVal->getSExtValue())) {
3291       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3292       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3293     }
3294   }
3295 
3296   // We need to convert the scalar to a splat vector.
3297   // FIXME: Can we implicitly truncate the scalar if it is known to
3298   // be sign extended?
3299   // VL should be the last operand.
3300   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3301   assert(VL.getValueType() == XLenVT);
3302   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3303   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3304 }
3305 
3306 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3307                                                      SelectionDAG &DAG) const {
3308   unsigned IntNo = Op.getConstantOperandVal(0);
3309   SDLoc DL(Op);
3310   MVT XLenVT = Subtarget.getXLenVT();
3311 
3312   switch (IntNo) {
3313   default:
3314     break; // Don't custom lower most intrinsics.
3315   case Intrinsic::thread_pointer: {
3316     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3317     return DAG.getRegister(RISCV::X4, PtrVT);
3318   }
3319   case Intrinsic::riscv_orc_b:
3320     // Lower to the GORCI encoding for orc.b.
3321     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3322                        DAG.getConstant(7, DL, XLenVT));
3323   case Intrinsic::riscv_grev:
3324   case Intrinsic::riscv_gorc: {
3325     unsigned Opc =
3326         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3327     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3328   }
3329   case Intrinsic::riscv_shfl:
3330   case Intrinsic::riscv_unshfl: {
3331     unsigned Opc =
3332         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3333     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3334   }
3335   case Intrinsic::riscv_bcompress:
3336   case Intrinsic::riscv_bdecompress: {
3337     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3338                                                        : RISCVISD::BDECOMPRESS;
3339     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3340   }
3341   case Intrinsic::riscv_vmv_x_s:
3342     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3343     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3344                        Op.getOperand(1));
3345   case Intrinsic::riscv_vmv_v_x:
3346     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3347                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3348   case Intrinsic::riscv_vfmv_v_f:
3349     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3350                        Op.getOperand(1), Op.getOperand(2));
3351   case Intrinsic::riscv_vmv_s_x: {
3352     SDValue Scalar = Op.getOperand(2);
3353 
3354     if (Scalar.getValueType().bitsLE(XLenVT)) {
3355       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3356       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3357                          Op.getOperand(1), Scalar, Op.getOperand(3));
3358     }
3359 
3360     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3361 
3362     // This is an i64 value that lives in two scalar registers. We have to
3363     // insert this in a convoluted way. First we build vXi64 splat containing
3364     // the/ two values that we assemble using some bit math. Next we'll use
3365     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3366     // to merge element 0 from our splat into the source vector.
3367     // FIXME: This is probably not the best way to do this, but it is
3368     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3369     // point.
3370     //   sw lo, (a0)
3371     //   sw hi, 4(a0)
3372     //   vlse vX, (a0)
3373     //
3374     //   vid.v      vVid
3375     //   vmseq.vx   mMask, vVid, 0
3376     //   vmerge.vvm vDest, vSrc, vVal, mMask
3377     MVT VT = Op.getSimpleValueType();
3378     SDValue Vec = Op.getOperand(1);
3379     SDValue VL = Op.getOperand(3);
3380 
3381     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3382     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3383                                       DAG.getConstant(0, DL, MVT::i32), VL);
3384 
3385     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3386     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3387     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3388     SDValue SelectCond =
3389         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3390                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3391     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3392                        Vec, VL);
3393   }
3394   case Intrinsic::riscv_vslide1up:
3395   case Intrinsic::riscv_vslide1down:
3396   case Intrinsic::riscv_vslide1up_mask:
3397   case Intrinsic::riscv_vslide1down_mask: {
3398     // We need to special case these when the scalar is larger than XLen.
3399     unsigned NumOps = Op.getNumOperands();
3400     bool IsMasked = NumOps == 6;
3401     unsigned OpOffset = IsMasked ? 1 : 0;
3402     SDValue Scalar = Op.getOperand(2 + OpOffset);
3403     if (Scalar.getValueType().bitsLE(XLenVT))
3404       break;
3405 
3406     // Splatting a sign extended constant is fine.
3407     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3408       if (isInt<32>(CVal->getSExtValue()))
3409         break;
3410 
3411     MVT VT = Op.getSimpleValueType();
3412     assert(VT.getVectorElementType() == MVT::i64 &&
3413            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3414 
3415     // Convert the vector source to the equivalent nxvXi32 vector.
3416     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3417     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3418 
3419     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3420                                    DAG.getConstant(0, DL, XLenVT));
3421     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3422                                    DAG.getConstant(1, DL, XLenVT));
3423 
3424     // Double the VL since we halved SEW.
3425     SDValue VL = Op.getOperand(NumOps - 1);
3426     SDValue I32VL =
3427         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3428 
3429     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3430     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3431 
3432     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3433     // instructions.
3434     if (IntNo == Intrinsic::riscv_vslide1up ||
3435         IntNo == Intrinsic::riscv_vslide1up_mask) {
3436       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3437                         I32Mask, I32VL);
3438       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3439                         I32Mask, I32VL);
3440     } else {
3441       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3442                         I32Mask, I32VL);
3443       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3444                         I32Mask, I32VL);
3445     }
3446 
3447     // Convert back to nxvXi64.
3448     Vec = DAG.getBitcast(VT, Vec);
3449 
3450     if (!IsMasked)
3451       return Vec;
3452 
3453     // Apply mask after the operation.
3454     SDValue Mask = Op.getOperand(NumOps - 2);
3455     SDValue MaskedOff = Op.getOperand(1);
3456     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3457   }
3458   }
3459 
3460   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3461 }
3462 
3463 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3464                                                     SelectionDAG &DAG) const {
3465   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3466 }
3467 
3468 static MVT getLMUL1VT(MVT VT) {
3469   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3470          "Unexpected vector MVT");
3471   return MVT::getScalableVectorVT(
3472       VT.getVectorElementType(),
3473       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3474 }
3475 
3476 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3477   switch (ISDOpcode) {
3478   default:
3479     llvm_unreachable("Unhandled reduction");
3480   case ISD::VECREDUCE_ADD:
3481     return RISCVISD::VECREDUCE_ADD_VL;
3482   case ISD::VECREDUCE_UMAX:
3483     return RISCVISD::VECREDUCE_UMAX_VL;
3484   case ISD::VECREDUCE_SMAX:
3485     return RISCVISD::VECREDUCE_SMAX_VL;
3486   case ISD::VECREDUCE_UMIN:
3487     return RISCVISD::VECREDUCE_UMIN_VL;
3488   case ISD::VECREDUCE_SMIN:
3489     return RISCVISD::VECREDUCE_SMIN_VL;
3490   case ISD::VECREDUCE_AND:
3491     return RISCVISD::VECREDUCE_AND_VL;
3492   case ISD::VECREDUCE_OR:
3493     return RISCVISD::VECREDUCE_OR_VL;
3494   case ISD::VECREDUCE_XOR:
3495     return RISCVISD::VECREDUCE_XOR_VL;
3496   }
3497 }
3498 
3499 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3500                                                       SelectionDAG &DAG) const {
3501   SDLoc DL(Op);
3502   SDValue Vec = Op.getOperand(0);
3503   MVT VecVT = Vec.getSimpleValueType();
3504   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3505           Op.getOpcode() == ISD::VECREDUCE_OR ||
3506           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3507          "Unexpected reduction lowering");
3508 
3509   MVT XLenVT = Subtarget.getXLenVT();
3510   assert(Op.getValueType() == XLenVT &&
3511          "Expected reduction output to be legalized to XLenVT");
3512 
3513   MVT ContainerVT = VecVT;
3514   if (VecVT.isFixedLengthVector()) {
3515     ContainerVT = getContainerForFixedLengthVector(VecVT);
3516     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3517   }
3518 
3519   SDValue Mask, VL;
3520   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3521   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3522 
3523   switch (Op.getOpcode()) {
3524   default:
3525     llvm_unreachable("Unhandled reduction");
3526   case ISD::VECREDUCE_AND:
3527     // vpopc ~x == 0
3528     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3529     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3530     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3531   case ISD::VECREDUCE_OR:
3532     // vpopc x != 0
3533     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3534     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3535   case ISD::VECREDUCE_XOR: {
3536     // ((vpopc x) & 1) != 0
3537     SDValue One = DAG.getConstant(1, DL, XLenVT);
3538     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3539     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3540     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3541   }
3542   }
3543 }
3544 
3545 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3546                                             SelectionDAG &DAG) const {
3547   SDLoc DL(Op);
3548   SDValue Vec = Op.getOperand(0);
3549   EVT VecEVT = Vec.getValueType();
3550 
3551   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3552 
3553   // Due to ordering in legalize types we may have a vector type that needs to
3554   // be split. Do that manually so we can get down to a legal type.
3555   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3556          TargetLowering::TypeSplitVector) {
3557     SDValue Lo, Hi;
3558     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3559     VecEVT = Lo.getValueType();
3560     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3561   }
3562 
3563   // TODO: The type may need to be widened rather than split. Or widened before
3564   // it can be split.
3565   if (!isTypeLegal(VecEVT))
3566     return SDValue();
3567 
3568   MVT VecVT = VecEVT.getSimpleVT();
3569   MVT VecEltVT = VecVT.getVectorElementType();
3570   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3571 
3572   MVT ContainerVT = VecVT;
3573   if (VecVT.isFixedLengthVector()) {
3574     ContainerVT = getContainerForFixedLengthVector(VecVT);
3575     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3576   }
3577 
3578   MVT M1VT = getLMUL1VT(ContainerVT);
3579 
3580   SDValue Mask, VL;
3581   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3582 
3583   // FIXME: This is a VLMAX splat which might be too large and can prevent
3584   // vsetvli removal.
3585   SDValue NeutralElem =
3586       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3587   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3588   SDValue Reduction =
3589       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3590   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3591                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3592   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3593 }
3594 
3595 // Given a reduction op, this function returns the matching reduction opcode,
3596 // the vector SDValue and the scalar SDValue required to lower this to a
3597 // RISCVISD node.
3598 static std::tuple<unsigned, SDValue, SDValue>
3599 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3600   SDLoc DL(Op);
3601   auto Flags = Op->getFlags();
3602   unsigned Opcode = Op.getOpcode();
3603   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3604   switch (Opcode) {
3605   default:
3606     llvm_unreachable("Unhandled reduction");
3607   case ISD::VECREDUCE_FADD:
3608     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3609                            DAG.getConstantFP(0.0, DL, EltVT));
3610   case ISD::VECREDUCE_SEQ_FADD:
3611     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3612                            Op.getOperand(0));
3613   case ISD::VECREDUCE_FMIN:
3614     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3615                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3616   case ISD::VECREDUCE_FMAX:
3617     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3618                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3619   }
3620 }
3621 
3622 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3623                                               SelectionDAG &DAG) const {
3624   SDLoc DL(Op);
3625   MVT VecEltVT = Op.getSimpleValueType();
3626 
3627   unsigned RVVOpcode;
3628   SDValue VectorVal, ScalarVal;
3629   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3630       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3631   MVT VecVT = VectorVal.getSimpleValueType();
3632 
3633   MVT ContainerVT = VecVT;
3634   if (VecVT.isFixedLengthVector()) {
3635     ContainerVT = getContainerForFixedLengthVector(VecVT);
3636     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3637   }
3638 
3639   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3640 
3641   SDValue Mask, VL;
3642   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3643 
3644   // FIXME: This is a VLMAX splat which might be too large and can prevent
3645   // vsetvli removal.
3646   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3647   SDValue Reduction =
3648       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3649   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3650                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3651 }
3652 
3653 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3654                                                    SelectionDAG &DAG) const {
3655   SDValue Vec = Op.getOperand(0);
3656   SDValue SubVec = Op.getOperand(1);
3657   MVT VecVT = Vec.getSimpleValueType();
3658   MVT SubVecVT = SubVec.getSimpleValueType();
3659 
3660   SDLoc DL(Op);
3661   MVT XLenVT = Subtarget.getXLenVT();
3662   unsigned OrigIdx = Op.getConstantOperandVal(2);
3663   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3664 
3665   // We don't have the ability to slide mask vectors up indexed by their i1
3666   // elements; the smallest we can do is i8. Often we are able to bitcast to
3667   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3668   // into a scalable one, we might not necessarily have enough scalable
3669   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3670   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3671       (OrigIdx != 0 || !Vec.isUndef())) {
3672     if (VecVT.getVectorMinNumElements() >= 8 &&
3673         SubVecVT.getVectorMinNumElements() >= 8) {
3674       assert(OrigIdx % 8 == 0 && "Invalid index");
3675       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3676              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3677              "Unexpected mask vector lowering");
3678       OrigIdx /= 8;
3679       SubVecVT =
3680           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3681                            SubVecVT.isScalableVector());
3682       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3683                                VecVT.isScalableVector());
3684       Vec = DAG.getBitcast(VecVT, Vec);
3685       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3686     } else {
3687       // We can't slide this mask vector up indexed by its i1 elements.
3688       // This poses a problem when we wish to insert a scalable vector which
3689       // can't be re-expressed as a larger type. Just choose the slow path and
3690       // extend to a larger type, then truncate back down.
3691       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3692       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3693       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3694       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3695       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3696                         Op.getOperand(2));
3697       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3698       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3699     }
3700   }
3701 
3702   // If the subvector vector is a fixed-length type, we cannot use subregister
3703   // manipulation to simplify the codegen; we don't know which register of a
3704   // LMUL group contains the specific subvector as we only know the minimum
3705   // register size. Therefore we must slide the vector group up the full
3706   // amount.
3707   if (SubVecVT.isFixedLengthVector()) {
3708     if (OrigIdx == 0 && Vec.isUndef())
3709       return Op;
3710     MVT ContainerVT = VecVT;
3711     if (VecVT.isFixedLengthVector()) {
3712       ContainerVT = getContainerForFixedLengthVector(VecVT);
3713       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3714     }
3715     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3716                          DAG.getUNDEF(ContainerVT), SubVec,
3717                          DAG.getConstant(0, DL, XLenVT));
3718     SDValue Mask =
3719         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3720     // Set the vector length to only the number of elements we care about. Note
3721     // that for slideup this includes the offset.
3722     SDValue VL =
3723         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3724     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3725     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3726                                   SubVec, SlideupAmt, Mask, VL);
3727     if (VecVT.isFixedLengthVector())
3728       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3729     return DAG.getBitcast(Op.getValueType(), Slideup);
3730   }
3731 
3732   unsigned SubRegIdx, RemIdx;
3733   std::tie(SubRegIdx, RemIdx) =
3734       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3735           VecVT, SubVecVT, OrigIdx, TRI);
3736 
3737   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3738   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3739                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3740                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3741 
3742   // 1. If the Idx has been completely eliminated and this subvector's size is
3743   // a vector register or a multiple thereof, or the surrounding elements are
3744   // undef, then this is a subvector insert which naturally aligns to a vector
3745   // register. These can easily be handled using subregister manipulation.
3746   // 2. If the subvector is smaller than a vector register, then the insertion
3747   // must preserve the undisturbed elements of the register. We do this by
3748   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3749   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3750   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3751   // LMUL=1 type back into the larger vector (resolving to another subregister
3752   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3753   // to avoid allocating a large register group to hold our subvector.
3754   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3755     return Op;
3756 
3757   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3758   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3759   // (in our case undisturbed). This means we can set up a subvector insertion
3760   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3761   // size of the subvector.
3762   MVT InterSubVT = VecVT;
3763   SDValue AlignedExtract = Vec;
3764   unsigned AlignedIdx = OrigIdx - RemIdx;
3765   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3766     InterSubVT = getLMUL1VT(VecVT);
3767     // Extract a subvector equal to the nearest full vector register type. This
3768     // should resolve to a EXTRACT_SUBREG instruction.
3769     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3770                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3771   }
3772 
3773   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3774   // For scalable vectors this must be further multiplied by vscale.
3775   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3776 
3777   SDValue Mask, VL;
3778   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3779 
3780   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3781   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3782   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3783   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3784 
3785   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3786                        DAG.getUNDEF(InterSubVT), SubVec,
3787                        DAG.getConstant(0, DL, XLenVT));
3788 
3789   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3790                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3791 
3792   // If required, insert this subvector back into the correct vector register.
3793   // This should resolve to an INSERT_SUBREG instruction.
3794   if (VecVT.bitsGT(InterSubVT))
3795     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3796                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3797 
3798   // We might have bitcast from a mask type: cast back to the original type if
3799   // required.
3800   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3801 }
3802 
3803 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3804                                                     SelectionDAG &DAG) const {
3805   SDValue Vec = Op.getOperand(0);
3806   MVT SubVecVT = Op.getSimpleValueType();
3807   MVT VecVT = Vec.getSimpleValueType();
3808 
3809   SDLoc DL(Op);
3810   MVT XLenVT = Subtarget.getXLenVT();
3811   unsigned OrigIdx = Op.getConstantOperandVal(1);
3812   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3813 
3814   // We don't have the ability to slide mask vectors down indexed by their i1
3815   // elements; the smallest we can do is i8. Often we are able to bitcast to
3816   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3817   // from a scalable one, we might not necessarily have enough scalable
3818   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3819   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3820     if (VecVT.getVectorMinNumElements() >= 8 &&
3821         SubVecVT.getVectorMinNumElements() >= 8) {
3822       assert(OrigIdx % 8 == 0 && "Invalid index");
3823       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3824              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3825              "Unexpected mask vector lowering");
3826       OrigIdx /= 8;
3827       SubVecVT =
3828           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3829                            SubVecVT.isScalableVector());
3830       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3831                                VecVT.isScalableVector());
3832       Vec = DAG.getBitcast(VecVT, Vec);
3833     } else {
3834       // We can't slide this mask vector down, indexed by its i1 elements.
3835       // This poses a problem when we wish to extract a scalable vector which
3836       // can't be re-expressed as a larger type. Just choose the slow path and
3837       // extend to a larger type, then truncate back down.
3838       // TODO: We could probably improve this when extracting certain fixed
3839       // from fixed, where we can extract as i8 and shift the correct element
3840       // right to reach the desired subvector?
3841       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3842       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3843       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3844       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3845                         Op.getOperand(1));
3846       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3847       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3848     }
3849   }
3850 
3851   // If the subvector vector is a fixed-length type, we cannot use subregister
3852   // manipulation to simplify the codegen; we don't know which register of a
3853   // LMUL group contains the specific subvector as we only know the minimum
3854   // register size. Therefore we must slide the vector group down the full
3855   // amount.
3856   if (SubVecVT.isFixedLengthVector()) {
3857     // With an index of 0 this is a cast-like subvector, which can be performed
3858     // with subregister operations.
3859     if (OrigIdx == 0)
3860       return Op;
3861     MVT ContainerVT = VecVT;
3862     if (VecVT.isFixedLengthVector()) {
3863       ContainerVT = getContainerForFixedLengthVector(VecVT);
3864       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3865     }
3866     SDValue Mask =
3867         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3868     // Set the vector length to only the number of elements we care about. This
3869     // avoids sliding down elements we're going to discard straight away.
3870     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3871     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3872     SDValue Slidedown =
3873         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3874                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3875     // Now we can use a cast-like subvector extract to get the result.
3876     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3877                             DAG.getConstant(0, DL, XLenVT));
3878     return DAG.getBitcast(Op.getValueType(), Slidedown);
3879   }
3880 
3881   unsigned SubRegIdx, RemIdx;
3882   std::tie(SubRegIdx, RemIdx) =
3883       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3884           VecVT, SubVecVT, OrigIdx, TRI);
3885 
3886   // If the Idx has been completely eliminated then this is a subvector extract
3887   // which naturally aligns to a vector register. These can easily be handled
3888   // using subregister manipulation.
3889   if (RemIdx == 0)
3890     return Op;
3891 
3892   // Else we must shift our vector register directly to extract the subvector.
3893   // Do this using VSLIDEDOWN.
3894 
3895   // If the vector type is an LMUL-group type, extract a subvector equal to the
3896   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3897   // instruction.
3898   MVT InterSubVT = VecVT;
3899   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3900     InterSubVT = getLMUL1VT(VecVT);
3901     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3902                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3903   }
3904 
3905   // Slide this vector register down by the desired number of elements in order
3906   // to place the desired subvector starting at element 0.
3907   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3908   // For scalable vectors this must be further multiplied by vscale.
3909   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3910 
3911   SDValue Mask, VL;
3912   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3913   SDValue Slidedown =
3914       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3915                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3916 
3917   // Now the vector is in the right position, extract our final subvector. This
3918   // should resolve to a COPY.
3919   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3920                           DAG.getConstant(0, DL, XLenVT));
3921 
3922   // We might have bitcast from a mask type: cast back to the original type if
3923   // required.
3924   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3925 }
3926 
3927 // Lower step_vector to the vid instruction. Any non-identity step value must
3928 // be accounted for my manual expansion.
3929 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
3930                                               SelectionDAG &DAG) const {
3931   SDLoc DL(Op);
3932   MVT VT = Op.getSimpleValueType();
3933   MVT XLenVT = Subtarget.getXLenVT();
3934   SDValue Mask, VL;
3935   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
3936   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3937   uint64_t StepValImm = Op.getConstantOperandVal(0);
3938   if (StepValImm != 1) {
3939     assert(Op.getOperand(0).getValueType() == XLenVT &&
3940            "Unexpected step value type");
3941     if (isPowerOf2_64(StepValImm)) {
3942       SDValue StepVal =
3943           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3944                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
3945       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
3946     } else {
3947       SDValue StepVal =
3948           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
3949       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
3950     }
3951   }
3952   return StepVec;
3953 }
3954 
3955 // Implement vector_reverse using vrgather.vv with indices determined by
3956 // subtracting the id of each element from (VLMAX-1). This will convert
3957 // the indices like so:
3958 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3959 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3960 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3961                                                  SelectionDAG &DAG) const {
3962   SDLoc DL(Op);
3963   MVT VecVT = Op.getSimpleValueType();
3964   unsigned EltSize = VecVT.getScalarSizeInBits();
3965   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3966 
3967   unsigned MaxVLMAX = 0;
3968   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3969   if (VectorBitsMax != 0)
3970     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3971 
3972   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3973   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3974 
3975   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3976   // to use vrgatherei16.vv.
3977   // TODO: It's also possible to use vrgatherei16.vv for other types to
3978   // decrease register width for the index calculation.
3979   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3980     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3981     // Reverse each half, then reassemble them in reverse order.
3982     // NOTE: It's also possible that after splitting that VLMAX no longer
3983     // requires vrgatherei16.vv.
3984     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3985       SDValue Lo, Hi;
3986       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3987       EVT LoVT, HiVT;
3988       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3989       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3990       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3991       // Reassemble the low and high pieces reversed.
3992       // FIXME: This is a CONCAT_VECTORS.
3993       SDValue Res =
3994           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3995                       DAG.getIntPtrConstant(0, DL));
3996       return DAG.getNode(
3997           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3998           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3999     }
4000 
4001     // Just promote the int type to i16 which will double the LMUL.
4002     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4003     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4004   }
4005 
4006   MVT XLenVT = Subtarget.getXLenVT();
4007   SDValue Mask, VL;
4008   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4009 
4010   // Calculate VLMAX-1 for the desired SEW.
4011   unsigned MinElts = VecVT.getVectorMinNumElements();
4012   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4013                               DAG.getConstant(MinElts, DL, XLenVT));
4014   SDValue VLMinus1 =
4015       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4016 
4017   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4018   bool IsRV32E64 =
4019       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4020   SDValue SplatVL;
4021   if (!IsRV32E64)
4022     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4023   else
4024     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4025 
4026   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4027   SDValue Indices =
4028       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4029 
4030   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4031 }
4032 
4033 SDValue
4034 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4035                                                      SelectionDAG &DAG) const {
4036   auto *Load = cast<LoadSDNode>(Op);
4037 
4038   SDLoc DL(Op);
4039   MVT VT = Op.getSimpleValueType();
4040   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4041 
4042   SDValue VL =
4043       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4044 
4045   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4046   SDValue NewLoad = DAG.getMemIntrinsicNode(
4047       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4048       Load->getMemoryVT(), Load->getMemOperand());
4049 
4050   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4051   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4052 }
4053 
4054 SDValue
4055 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4056                                                       SelectionDAG &DAG) const {
4057   auto *Store = cast<StoreSDNode>(Op);
4058 
4059   SDLoc DL(Op);
4060   SDValue StoreVal = Store->getValue();
4061   MVT VT = StoreVal.getSimpleValueType();
4062 
4063   // If the size less than a byte, we need to pad with zeros to make a byte.
4064   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4065     VT = MVT::v8i1;
4066     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4067                            DAG.getConstant(0, DL, VT), StoreVal,
4068                            DAG.getIntPtrConstant(0, DL));
4069   }
4070 
4071   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4072 
4073   SDValue VL =
4074       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4075 
4076   SDValue NewValue =
4077       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4078   return DAG.getMemIntrinsicNode(
4079       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4080       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4081       Store->getMemoryVT(), Store->getMemOperand());
4082 }
4083 
4084 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4085   auto *Load = cast<MaskedLoadSDNode>(Op);
4086 
4087   SDLoc DL(Op);
4088   MVT VT = Op.getSimpleValueType();
4089   MVT XLenVT = Subtarget.getXLenVT();
4090 
4091   SDValue Mask = Load->getMask();
4092   SDValue PassThru = Load->getPassThru();
4093   SDValue VL;
4094 
4095   MVT ContainerVT = VT;
4096   if (VT.isFixedLengthVector()) {
4097     ContainerVT = getContainerForFixedLengthVector(VT);
4098     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4099 
4100     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4101     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4102     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4103   } else
4104     VL = DAG.getRegister(RISCV::X0, XLenVT);
4105 
4106   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4107   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4108   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4109                    Load->getBasePtr(), Mask,  VL};
4110   SDValue Result =
4111       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4112                               Load->getMemoryVT(), Load->getMemOperand());
4113   SDValue Chain = Result.getValue(1);
4114 
4115   if (VT.isFixedLengthVector())
4116     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4117 
4118   return DAG.getMergeValues({Result, Chain}, DL);
4119 }
4120 
4121 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4122   auto *Store = cast<MaskedStoreSDNode>(Op);
4123 
4124   SDLoc DL(Op);
4125   SDValue Val = Store->getValue();
4126   SDValue Mask = Store->getMask();
4127   MVT VT = Val.getSimpleValueType();
4128   MVT XLenVT = Subtarget.getXLenVT();
4129   SDValue VL;
4130 
4131   MVT ContainerVT = VT;
4132   if (VT.isFixedLengthVector()) {
4133     ContainerVT = getContainerForFixedLengthVector(VT);
4134     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4135 
4136     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4137     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4138     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4139   } else
4140     VL = DAG.getRegister(RISCV::X0, XLenVT);
4141 
4142   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4143   return DAG.getMemIntrinsicNode(
4144       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4145       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4146       Store->getMemoryVT(), Store->getMemOperand());
4147 }
4148 
4149 SDValue
4150 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4151                                                       SelectionDAG &DAG) const {
4152   MVT InVT = Op.getOperand(0).getSimpleValueType();
4153   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4154 
4155   MVT VT = Op.getSimpleValueType();
4156 
4157   SDValue Op1 =
4158       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4159   SDValue Op2 =
4160       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4161 
4162   SDLoc DL(Op);
4163   SDValue VL =
4164       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4165 
4166   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4167   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4168 
4169   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4170                             Op.getOperand(2), Mask, VL);
4171 
4172   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4173 }
4174 
4175 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4176     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4177   MVT VT = Op.getSimpleValueType();
4178 
4179   if (VT.getVectorElementType() == MVT::i1)
4180     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4181 
4182   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4183 }
4184 
4185 // Lower vector ABS to smax(X, sub(0, X)).
4186 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4187   SDLoc DL(Op);
4188   MVT VT = Op.getSimpleValueType();
4189   SDValue X = Op.getOperand(0);
4190 
4191   assert(VT.isFixedLengthVector() && "Unexpected type");
4192 
4193   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4194   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4195 
4196   SDValue Mask, VL;
4197   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4198 
4199   SDValue SplatZero =
4200       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4201                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4202   SDValue NegX =
4203       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4204   SDValue Max =
4205       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4206 
4207   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4208 }
4209 
4210 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4211     SDValue Op, SelectionDAG &DAG) const {
4212   SDLoc DL(Op);
4213   MVT VT = Op.getSimpleValueType();
4214   SDValue Mag = Op.getOperand(0);
4215   SDValue Sign = Op.getOperand(1);
4216   assert(Mag.getValueType() == Sign.getValueType() &&
4217          "Can only handle COPYSIGN with matching types.");
4218 
4219   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4220   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4221   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4222 
4223   SDValue Mask, VL;
4224   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4225 
4226   SDValue CopySign =
4227       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4228 
4229   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4230 }
4231 
4232 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4233     SDValue Op, SelectionDAG &DAG) const {
4234   MVT VT = Op.getSimpleValueType();
4235   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4236 
4237   MVT I1ContainerVT =
4238       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4239 
4240   SDValue CC =
4241       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4242   SDValue Op1 =
4243       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4244   SDValue Op2 =
4245       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4246 
4247   SDLoc DL(Op);
4248   SDValue Mask, VL;
4249   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4250 
4251   SDValue Select =
4252       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4253 
4254   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4255 }
4256 
4257 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4258                                                unsigned NewOpc,
4259                                                bool HasMask) const {
4260   MVT VT = Op.getSimpleValueType();
4261   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4262 
4263   // Create list of operands by converting existing ones to scalable types.
4264   SmallVector<SDValue, 6> Ops;
4265   for (const SDValue &V : Op->op_values()) {
4266     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4267 
4268     // Pass through non-vector operands.
4269     if (!V.getValueType().isVector()) {
4270       Ops.push_back(V);
4271       continue;
4272     }
4273 
4274     // "cast" fixed length vector to a scalable vector.
4275     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4276            "Only fixed length vectors are supported!");
4277     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4278   }
4279 
4280   SDLoc DL(Op);
4281   SDValue Mask, VL;
4282   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4283   if (HasMask)
4284     Ops.push_back(Mask);
4285   Ops.push_back(VL);
4286 
4287   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4288   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4289 }
4290 
4291 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4292 // * Operands of each node are assumed to be in the same order.
4293 // * The EVL operand is promoted from i32 to i64 on RV64.
4294 // * Fixed-length vectors are converted to their scalable-vector container
4295 //   types.
4296 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4297                                        unsigned RISCVISDOpc) const {
4298   SDLoc DL(Op);
4299   MVT VT = Op.getSimpleValueType();
4300   Optional<unsigned> EVLIdx = ISD::getVPExplicitVectorLengthIdx(Op.getOpcode());
4301 
4302   SmallVector<SDValue, 4> Ops;
4303   MVT XLenVT = Subtarget.getXLenVT();
4304 
4305   for (const auto &OpIdx : enumerate(Op->ops())) {
4306     SDValue V = OpIdx.value();
4307     if ((unsigned)OpIdx.index() == EVLIdx) {
4308       Ops.push_back(DAG.getZExtOrTrunc(V, DL, XLenVT));
4309       continue;
4310     }
4311     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4312     // Pass through operands which aren't fixed-length vectors.
4313     if (!V.getValueType().isFixedLengthVector()) {
4314       Ops.push_back(V);
4315       continue;
4316     }
4317     // "cast" fixed length vector to a scalable vector.
4318     MVT OpVT = V.getSimpleValueType();
4319     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4320     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4321            "Only fixed length vectors are supported!");
4322     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4323   }
4324 
4325   if (!VT.isFixedLengthVector())
4326     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4327 
4328   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4329 
4330   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4331 
4332   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4333 }
4334 
4335 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4336 // a RVV indexed load. The RVV indexed load instructions only support the
4337 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4338 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4339 // indexing is extended to the XLEN value type and scaled accordingly.
4340 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4341   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4342   SDLoc DL(Op);
4343 
4344   SDValue Index = MGN->getIndex();
4345   SDValue Mask = MGN->getMask();
4346   SDValue PassThru = MGN->getPassThru();
4347 
4348   MVT VT = Op.getSimpleValueType();
4349   MVT IndexVT = Index.getSimpleValueType();
4350   MVT XLenVT = Subtarget.getXLenVT();
4351 
4352   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4353          "Unexpected VTs!");
4354   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4355          "Unexpected pointer type");
4356   // Targets have to explicitly opt-in for extending vector loads.
4357   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4358          "Unexpected extending MGATHER");
4359 
4360   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4361   // the selection of the masked intrinsics doesn't do this for us.
4362   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4363 
4364   SDValue VL;
4365   MVT ContainerVT = VT;
4366   if (VT.isFixedLengthVector()) {
4367     // We need to use the larger of the result and index type to determine the
4368     // scalable type to use so we don't increase LMUL for any operand/result.
4369     if (VT.bitsGE(IndexVT)) {
4370       ContainerVT = getContainerForFixedLengthVector(VT);
4371       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4372                                  ContainerVT.getVectorElementCount());
4373     } else {
4374       IndexVT = getContainerForFixedLengthVector(IndexVT);
4375       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4376                                      IndexVT.getVectorElementCount());
4377     }
4378 
4379     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4380 
4381     if (!IsUnmasked) {
4382       MVT MaskVT =
4383           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4384       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4385       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4386     }
4387 
4388     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4389   } else
4390     VL = DAG.getRegister(RISCV::X0, XLenVT);
4391 
4392   unsigned IntID =
4393       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4394   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4395                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4396   if (!IsUnmasked)
4397     Ops.push_back(PassThru);
4398   Ops.push_back(MGN->getBasePtr());
4399   Ops.push_back(Index);
4400   if (!IsUnmasked)
4401     Ops.push_back(Mask);
4402   Ops.push_back(VL);
4403 
4404   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4405   SDValue Result =
4406       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4407                               MGN->getMemoryVT(), MGN->getMemOperand());
4408   SDValue Chain = Result.getValue(1);
4409 
4410   if (VT.isFixedLengthVector())
4411     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4412 
4413   return DAG.getMergeValues({Result, Chain}, DL);
4414 }
4415 
4416 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4417 // a RVV indexed store. The RVV indexed store instructions only support the
4418 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4419 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4420 // indexing is extended to the XLEN value type and scaled accordingly.
4421 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4422                                            SelectionDAG &DAG) const {
4423   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4424   SDLoc DL(Op);
4425   SDValue Index = MSN->getIndex();
4426   SDValue Mask = MSN->getMask();
4427   SDValue Val = MSN->getValue();
4428 
4429   MVT VT = Val.getSimpleValueType();
4430   MVT IndexVT = Index.getSimpleValueType();
4431   MVT XLenVT = Subtarget.getXLenVT();
4432 
4433   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4434          "Unexpected VTs!");
4435   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4436          "Unexpected pointer type");
4437   // Targets have to explicitly opt-in for extending vector loads and
4438   // truncating vector stores.
4439   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4440 
4441   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4442   // the selection of the masked intrinsics doesn't do this for us.
4443   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4444 
4445   SDValue VL;
4446   if (VT.isFixedLengthVector()) {
4447     // We need to use the larger of the value and index type to determine the
4448     // scalable type to use so we don't increase LMUL for any operand/result.
4449     MVT ContainerVT;
4450     if (VT.bitsGE(IndexVT)) {
4451       ContainerVT = getContainerForFixedLengthVector(VT);
4452       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4453                                  ContainerVT.getVectorElementCount());
4454     } else {
4455       IndexVT = getContainerForFixedLengthVector(IndexVT);
4456       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4457                                      IndexVT.getVectorElementCount());
4458     }
4459 
4460     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4461     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4462 
4463     if (!IsUnmasked) {
4464       MVT MaskVT =
4465           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4466       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4467     }
4468 
4469     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4470   } else
4471     VL = DAG.getRegister(RISCV::X0, XLenVT);
4472 
4473   unsigned IntID =
4474       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4475   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4476                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4477   Ops.push_back(Val);
4478   Ops.push_back(MSN->getBasePtr());
4479   Ops.push_back(Index);
4480   if (!IsUnmasked)
4481     Ops.push_back(Mask);
4482   Ops.push_back(VL);
4483 
4484   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4485                                  MSN->getMemoryVT(), MSN->getMemOperand());
4486 }
4487 
4488 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4489                                                SelectionDAG &DAG) const {
4490   const MVT XLenVT = Subtarget.getXLenVT();
4491   SDLoc DL(Op);
4492   SDValue Chain = Op->getOperand(0);
4493   SDValue SysRegNo = DAG.getConstant(
4494       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4495   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4496   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4497 
4498   // Encoding used for rounding mode in RISCV differs from that used in
4499   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4500   // table, which consists of a sequence of 4-bit fields, each representing
4501   // corresponding FLT_ROUNDS mode.
4502   static const int Table =
4503       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4504       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4505       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4506       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4507       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4508 
4509   SDValue Shift =
4510       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4511   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4512                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4513   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4514                                DAG.getConstant(7, DL, XLenVT));
4515 
4516   return DAG.getMergeValues({Masked, Chain}, DL);
4517 }
4518 
4519 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4520                                                SelectionDAG &DAG) const {
4521   const MVT XLenVT = Subtarget.getXLenVT();
4522   SDLoc DL(Op);
4523   SDValue Chain = Op->getOperand(0);
4524   SDValue RMValue = Op->getOperand(1);
4525   SDValue SysRegNo = DAG.getConstant(
4526       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4527 
4528   // Encoding used for rounding mode in RISCV differs from that used in
4529   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4530   // a table, which consists of a sequence of 4-bit fields, each representing
4531   // corresponding RISCV mode.
4532   static const unsigned Table =
4533       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4534       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4535       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4536       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4537       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4538 
4539   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4540                               DAG.getConstant(2, DL, XLenVT));
4541   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4542                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4543   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4544                         DAG.getConstant(0x7, DL, XLenVT));
4545   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4546                      RMValue);
4547 }
4548 
4549 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4550 // form of the given Opcode.
4551 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4552   switch (Opcode) {
4553   default:
4554     llvm_unreachable("Unexpected opcode");
4555   case ISD::SHL:
4556     return RISCVISD::SLLW;
4557   case ISD::SRA:
4558     return RISCVISD::SRAW;
4559   case ISD::SRL:
4560     return RISCVISD::SRLW;
4561   case ISD::SDIV:
4562     return RISCVISD::DIVW;
4563   case ISD::UDIV:
4564     return RISCVISD::DIVUW;
4565   case ISD::UREM:
4566     return RISCVISD::REMUW;
4567   case ISD::ROTL:
4568     return RISCVISD::ROLW;
4569   case ISD::ROTR:
4570     return RISCVISD::RORW;
4571   case RISCVISD::GREV:
4572     return RISCVISD::GREVW;
4573   case RISCVISD::GORC:
4574     return RISCVISD::GORCW;
4575   }
4576 }
4577 
4578 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4579 // Because i32 isn't a legal type for RV64, these operations would otherwise
4580 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4581 // later one because the fact the operation was originally of type i32 is
4582 // lost.
4583 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4584                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4585   SDLoc DL(N);
4586   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4587   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4588   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4589   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4590   // ReplaceNodeResults requires we maintain the same type for the return value.
4591   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4592 }
4593 
4594 // Converts the given 32-bit operation to a i64 operation with signed extension
4595 // semantic to reduce the signed extension instructions.
4596 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4597   SDLoc DL(N);
4598   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4599   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4600   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4601   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4602                                DAG.getValueType(MVT::i32));
4603   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4604 }
4605 
4606 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4607                                              SmallVectorImpl<SDValue> &Results,
4608                                              SelectionDAG &DAG) const {
4609   SDLoc DL(N);
4610   switch (N->getOpcode()) {
4611   default:
4612     llvm_unreachable("Don't know how to custom type legalize this operation!");
4613   case ISD::STRICT_FP_TO_SINT:
4614   case ISD::STRICT_FP_TO_UINT:
4615   case ISD::FP_TO_SINT:
4616   case ISD::FP_TO_UINT: {
4617     bool IsStrict = N->isStrictFPOpcode();
4618     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4619            "Unexpected custom legalisation");
4620     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4621     // If the FP type needs to be softened, emit a library call using the 'si'
4622     // version. If we left it to default legalization we'd end up with 'di'. If
4623     // the FP type doesn't need to be softened just let generic type
4624     // legalization promote the result type.
4625     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4626         TargetLowering::TypeSoftenFloat)
4627       return;
4628     RTLIB::Libcall LC;
4629     if (N->getOpcode() == ISD::FP_TO_SINT ||
4630         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4631       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4632     else
4633       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4634     MakeLibCallOptions CallOptions;
4635     EVT OpVT = Op0.getValueType();
4636     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4637     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4638     SDValue Result;
4639     std::tie(Result, Chain) =
4640         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4641     Results.push_back(Result);
4642     if (IsStrict)
4643       Results.push_back(Chain);
4644     break;
4645   }
4646   case ISD::READCYCLECOUNTER: {
4647     assert(!Subtarget.is64Bit() &&
4648            "READCYCLECOUNTER only has custom type legalization on riscv32");
4649 
4650     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4651     SDValue RCW =
4652         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4653 
4654     Results.push_back(
4655         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4656     Results.push_back(RCW.getValue(2));
4657     break;
4658   }
4659   case ISD::MUL: {
4660     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4661     unsigned XLen = Subtarget.getXLen();
4662     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4663     if (Size > XLen) {
4664       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4665       SDValue LHS = N->getOperand(0);
4666       SDValue RHS = N->getOperand(1);
4667       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4668 
4669       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4670       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4671       // We need exactly one side to be unsigned.
4672       if (LHSIsU == RHSIsU)
4673         return;
4674 
4675       auto MakeMULPair = [&](SDValue S, SDValue U) {
4676         MVT XLenVT = Subtarget.getXLenVT();
4677         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4678         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4679         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4680         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4681         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4682       };
4683 
4684       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4685       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4686 
4687       // The other operand should be signed, but still prefer MULH when
4688       // possible.
4689       if (RHSIsU && LHSIsS && !RHSIsS)
4690         Results.push_back(MakeMULPair(LHS, RHS));
4691       else if (LHSIsU && RHSIsS && !LHSIsS)
4692         Results.push_back(MakeMULPair(RHS, LHS));
4693 
4694       return;
4695     }
4696     LLVM_FALLTHROUGH;
4697   }
4698   case ISD::ADD:
4699   case ISD::SUB:
4700     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4701            "Unexpected custom legalisation");
4702     if (N->getOperand(1).getOpcode() == ISD::Constant)
4703       return;
4704     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4705     break;
4706   case ISD::SHL:
4707   case ISD::SRA:
4708   case ISD::SRL:
4709     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4710            "Unexpected custom legalisation");
4711     if (N->getOperand(1).getOpcode() == ISD::Constant)
4712       return;
4713     Results.push_back(customLegalizeToWOp(N, DAG));
4714     break;
4715   case ISD::ROTL:
4716   case ISD::ROTR:
4717     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4718            "Unexpected custom legalisation");
4719     Results.push_back(customLegalizeToWOp(N, DAG));
4720     break;
4721   case ISD::CTTZ:
4722   case ISD::CTTZ_ZERO_UNDEF:
4723   case ISD::CTLZ:
4724   case ISD::CTLZ_ZERO_UNDEF: {
4725     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4726            "Unexpected custom legalisation");
4727 
4728     SDValue NewOp0 =
4729         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4730     bool IsCTZ =
4731         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4732     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4733     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4734     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4735     return;
4736   }
4737   case ISD::SDIV:
4738   case ISD::UDIV:
4739   case ISD::UREM: {
4740     MVT VT = N->getSimpleValueType(0);
4741     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4742            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4743            "Unexpected custom legalisation");
4744     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4745         N->getOperand(1).getOpcode() == ISD::Constant)
4746       return;
4747 
4748     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4749     // the upper 32 bits. For other types we need to sign or zero extend
4750     // based on the opcode.
4751     unsigned ExtOpc = ISD::ANY_EXTEND;
4752     if (VT != MVT::i32)
4753       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4754                                            : ISD::ZERO_EXTEND;
4755 
4756     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4757     break;
4758   }
4759   case ISD::UADDO:
4760   case ISD::USUBO: {
4761     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4762            "Unexpected custom legalisation");
4763     bool IsAdd = N->getOpcode() == ISD::UADDO;
4764     // Create an ADDW or SUBW.
4765     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4766     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4767     SDValue Res =
4768         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4769     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4770                       DAG.getValueType(MVT::i32));
4771 
4772     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4773     // Since the inputs are sign extended from i32, this is equivalent to
4774     // comparing the lower 32 bits.
4775     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4776     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4777                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4778 
4779     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4780     Results.push_back(Overflow);
4781     return;
4782   }
4783   case ISD::UADDSAT:
4784   case ISD::USUBSAT: {
4785     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4786            "Unexpected custom legalisation");
4787     if (Subtarget.hasStdExtZbb()) {
4788       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4789       // sign extend allows overflow of the lower 32 bits to be detected on
4790       // the promoted size.
4791       SDValue LHS =
4792           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4793       SDValue RHS =
4794           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4795       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4796       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4797       return;
4798     }
4799 
4800     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4801     // promotion for UADDO/USUBO.
4802     Results.push_back(expandAddSubSat(N, DAG));
4803     return;
4804   }
4805   case ISD::BITCAST: {
4806     EVT VT = N->getValueType(0);
4807     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4808     SDValue Op0 = N->getOperand(0);
4809     EVT Op0VT = Op0.getValueType();
4810     MVT XLenVT = Subtarget.getXLenVT();
4811     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4812       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4813       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4814     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4815                Subtarget.hasStdExtF()) {
4816       SDValue FPConv =
4817           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4818       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4819     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4820                isTypeLegal(Op0VT)) {
4821       // Custom-legalize bitcasts from fixed-length vector types to illegal
4822       // scalar types in order to improve codegen. Bitcast the vector to a
4823       // one-element vector type whose element type is the same as the result
4824       // type, and extract the first element.
4825       LLVMContext &Context = *DAG.getContext();
4826       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4827       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4828                                     DAG.getConstant(0, DL, XLenVT)));
4829     }
4830     break;
4831   }
4832   case RISCVISD::GREV:
4833   case RISCVISD::GORC: {
4834     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4835            "Unexpected custom legalisation");
4836     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4837     // This is similar to customLegalizeToWOp, except that we pass the second
4838     // operand (a TargetConstant) straight through: it is already of type
4839     // XLenVT.
4840     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4841     SDValue NewOp0 =
4842         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4843     SDValue NewOp1 =
4844         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4845     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4846     // ReplaceNodeResults requires we maintain the same type for the return
4847     // value.
4848     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4849     break;
4850   }
4851   case RISCVISD::SHFL: {
4852     // There is no SHFLIW instruction, but we can just promote the operation.
4853     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4854            "Unexpected custom legalisation");
4855     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4856     SDValue NewOp0 =
4857         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4858     SDValue NewOp1 =
4859         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4860     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4861     // ReplaceNodeResults requires we maintain the same type for the return
4862     // value.
4863     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4864     break;
4865   }
4866   case ISD::BSWAP:
4867   case ISD::BITREVERSE: {
4868     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4869            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4870     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
4871                                  N->getOperand(0));
4872     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
4873     SDValue GREVIW = DAG.getNode(RISCVISD::GREVW, DL, MVT::i64, NewOp0,
4874                                  DAG.getConstant(Imm, DL, MVT::i64));
4875     // ReplaceNodeResults requires we maintain the same type for the return
4876     // value.
4877     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
4878     break;
4879   }
4880   case ISD::FSHL:
4881   case ISD::FSHR: {
4882     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4883            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4884     SDValue NewOp0 =
4885         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4886     SDValue NewOp1 =
4887         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4888     SDValue NewOp2 =
4889         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4890     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4891     // Mask the shift amount to 5 bits.
4892     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4893                          DAG.getConstant(0x1f, DL, MVT::i64));
4894     unsigned Opc =
4895         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
4896     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
4897     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
4898     break;
4899   }
4900   case ISD::EXTRACT_VECTOR_ELT: {
4901     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
4902     // type is illegal (currently only vXi64 RV32).
4903     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
4904     // transferred to the destination register. We issue two of these from the
4905     // upper- and lower- halves of the SEW-bit vector element, slid down to the
4906     // first element.
4907     SDValue Vec = N->getOperand(0);
4908     SDValue Idx = N->getOperand(1);
4909 
4910     // The vector type hasn't been legalized yet so we can't issue target
4911     // specific nodes if it needs legalization.
4912     // FIXME: We would manually legalize if it's important.
4913     if (!isTypeLegal(Vec.getValueType()))
4914       return;
4915 
4916     MVT VecVT = Vec.getSimpleValueType();
4917 
4918     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
4919            VecVT.getVectorElementType() == MVT::i64 &&
4920            "Unexpected EXTRACT_VECTOR_ELT legalization");
4921 
4922     // If this is a fixed vector, we need to convert it to a scalable vector.
4923     MVT ContainerVT = VecVT;
4924     if (VecVT.isFixedLengthVector()) {
4925       ContainerVT = getContainerForFixedLengthVector(VecVT);
4926       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4927     }
4928 
4929     MVT XLenVT = Subtarget.getXLenVT();
4930 
4931     // Use a VL of 1 to avoid processing more elements than we need.
4932     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4933     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4934     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4935 
4936     // Unless the index is known to be 0, we must slide the vector down to get
4937     // the desired element into index 0.
4938     if (!isNullConstant(Idx)) {
4939       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4940                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4941     }
4942 
4943     // Extract the lower XLEN bits of the correct vector element.
4944     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4945 
4946     // To extract the upper XLEN bits of the vector element, shift the first
4947     // element right by 32 bits and re-extract the lower XLEN bits.
4948     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4949                                      DAG.getConstant(32, DL, XLenVT), VL);
4950     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
4951                                  ThirtyTwoV, Mask, VL);
4952 
4953     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4954 
4955     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4956     break;
4957   }
4958   case ISD::INTRINSIC_WO_CHAIN: {
4959     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4960     switch (IntNo) {
4961     default:
4962       llvm_unreachable(
4963           "Don't know how to custom type legalize this intrinsic!");
4964     case Intrinsic::riscv_orc_b: {
4965       // Lower to the GORCI encoding for orc.b with the operand extended.
4966       SDValue NewOp =
4967           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4968       // If Zbp is enabled, use GORCIW which will sign extend the result.
4969       unsigned Opc =
4970           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
4971       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
4972                                 DAG.getConstant(7, DL, MVT::i64));
4973       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4974       return;
4975     }
4976     case Intrinsic::riscv_grev:
4977     case Intrinsic::riscv_gorc: {
4978       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4979              "Unexpected custom legalisation");
4980       SDValue NewOp1 =
4981           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4982       SDValue NewOp2 =
4983           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4984       unsigned Opc =
4985           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
4986       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4987       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4988       break;
4989     }
4990     case Intrinsic::riscv_shfl:
4991     case Intrinsic::riscv_unshfl: {
4992       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4993              "Unexpected custom legalisation");
4994       SDValue NewOp1 =
4995           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4996       SDValue NewOp2 =
4997           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4998       unsigned Opc =
4999           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5000       if (isa<ConstantSDNode>(N->getOperand(2))) {
5001         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5002                              DAG.getConstant(0xf, DL, MVT::i64));
5003         Opc =
5004             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5005       }
5006       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5007       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5008       break;
5009     }
5010     case Intrinsic::riscv_bcompress:
5011     case Intrinsic::riscv_bdecompress: {
5012       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5013              "Unexpected custom legalisation");
5014       SDValue NewOp1 =
5015           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5016       SDValue NewOp2 =
5017           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5018       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5019                          ? RISCVISD::BCOMPRESSW
5020                          : RISCVISD::BDECOMPRESSW;
5021       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5022       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5023       break;
5024     }
5025     case Intrinsic::riscv_vmv_x_s: {
5026       EVT VT = N->getValueType(0);
5027       MVT XLenVT = Subtarget.getXLenVT();
5028       if (VT.bitsLT(XLenVT)) {
5029         // Simple case just extract using vmv.x.s and truncate.
5030         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5031                                       Subtarget.getXLenVT(), N->getOperand(1));
5032         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5033         return;
5034       }
5035 
5036       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5037              "Unexpected custom legalization");
5038 
5039       // We need to do the move in two steps.
5040       SDValue Vec = N->getOperand(1);
5041       MVT VecVT = Vec.getSimpleValueType();
5042 
5043       // First extract the lower XLEN bits of the element.
5044       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5045 
5046       // To extract the upper XLEN bits of the vector element, shift the first
5047       // element right by 32 bits and re-extract the lower XLEN bits.
5048       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5049       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5050       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5051       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5052                                        DAG.getConstant(32, DL, XLenVT), VL);
5053       SDValue LShr32 =
5054           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5055       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5056 
5057       Results.push_back(
5058           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5059       break;
5060     }
5061     }
5062     break;
5063   }
5064   case ISD::VECREDUCE_ADD:
5065   case ISD::VECREDUCE_AND:
5066   case ISD::VECREDUCE_OR:
5067   case ISD::VECREDUCE_XOR:
5068   case ISD::VECREDUCE_SMAX:
5069   case ISD::VECREDUCE_UMAX:
5070   case ISD::VECREDUCE_SMIN:
5071   case ISD::VECREDUCE_UMIN:
5072     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5073       Results.push_back(V);
5074     break;
5075   case ISD::FLT_ROUNDS_: {
5076     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5077     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5078     Results.push_back(Res.getValue(0));
5079     Results.push_back(Res.getValue(1));
5080     break;
5081   }
5082   }
5083 }
5084 
5085 // A structure to hold one of the bit-manipulation patterns below. Together, a
5086 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5087 //   (or (and (shl x, 1), 0xAAAAAAAA),
5088 //       (and (srl x, 1), 0x55555555))
5089 struct RISCVBitmanipPat {
5090   SDValue Op;
5091   unsigned ShAmt;
5092   bool IsSHL;
5093 
5094   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5095     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5096   }
5097 };
5098 
5099 // Matches patterns of the form
5100 //   (and (shl x, C2), (C1 << C2))
5101 //   (and (srl x, C2), C1)
5102 //   (shl (and x, C1), C2)
5103 //   (srl (and x, (C1 << C2)), C2)
5104 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5105 // The expected masks for each shift amount are specified in BitmanipMasks where
5106 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5107 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5108 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5109 // XLen is 64.
5110 static Optional<RISCVBitmanipPat>
5111 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5112   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5113          "Unexpected number of masks");
5114   Optional<uint64_t> Mask;
5115   // Optionally consume a mask around the shift operation.
5116   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5117     Mask = Op.getConstantOperandVal(1);
5118     Op = Op.getOperand(0);
5119   }
5120   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5121     return None;
5122   bool IsSHL = Op.getOpcode() == ISD::SHL;
5123 
5124   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5125     return None;
5126   uint64_t ShAmt = Op.getConstantOperandVal(1);
5127 
5128   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5129   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
5130     return None;
5131   // If we don't have enough masks for 64 bit, then we must be trying to
5132   // match SHFL so we're only allowed to shift 1/4 of the width.
5133   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5134     return None;
5135 
5136   SDValue Src = Op.getOperand(0);
5137 
5138   // The expected mask is shifted left when the AND is found around SHL
5139   // patterns.
5140   //   ((x >> 1) & 0x55555555)
5141   //   ((x << 1) & 0xAAAAAAAA)
5142   bool SHLExpMask = IsSHL;
5143 
5144   if (!Mask) {
5145     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5146     // the mask is all ones: consume that now.
5147     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5148       Mask = Src.getConstantOperandVal(1);
5149       Src = Src.getOperand(0);
5150       // The expected mask is now in fact shifted left for SRL, so reverse the
5151       // decision.
5152       //   ((x & 0xAAAAAAAA) >> 1)
5153       //   ((x & 0x55555555) << 1)
5154       SHLExpMask = !SHLExpMask;
5155     } else {
5156       // Use a default shifted mask of all-ones if there's no AND, truncated
5157       // down to the expected width. This simplifies the logic later on.
5158       Mask = maskTrailingOnes<uint64_t>(Width);
5159       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5160     }
5161   }
5162 
5163   unsigned MaskIdx = Log2_32(ShAmt);
5164   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5165 
5166   if (SHLExpMask)
5167     ExpMask <<= ShAmt;
5168 
5169   if (Mask != ExpMask)
5170     return None;
5171 
5172   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5173 }
5174 
5175 // Matches any of the following bit-manipulation patterns:
5176 //   (and (shl x, 1), (0x55555555 << 1))
5177 //   (and (srl x, 1), 0x55555555)
5178 //   (shl (and x, 0x55555555), 1)
5179 //   (srl (and x, (0x55555555 << 1)), 1)
5180 // where the shift amount and mask may vary thus:
5181 //   [1]  = 0x55555555 / 0xAAAAAAAA
5182 //   [2]  = 0x33333333 / 0xCCCCCCCC
5183 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5184 //   [8]  = 0x00FF00FF / 0xFF00FF00
5185 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5186 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5187 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5188   // These are the unshifted masks which we use to match bit-manipulation
5189   // patterns. They may be shifted left in certain circumstances.
5190   static const uint64_t BitmanipMasks[] = {
5191       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5192       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5193 
5194   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5195 }
5196 
5197 // Match the following pattern as a GREVI(W) operation
5198 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5199 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5200                                const RISCVSubtarget &Subtarget) {
5201   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5202   EVT VT = Op.getValueType();
5203 
5204   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5205     auto LHS = matchGREVIPat(Op.getOperand(0));
5206     auto RHS = matchGREVIPat(Op.getOperand(1));
5207     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5208       SDLoc DL(Op);
5209       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5210                          DAG.getConstant(LHS->ShAmt, DL, VT));
5211     }
5212   }
5213   return SDValue();
5214 }
5215 
5216 // Matches any the following pattern as a GORCI(W) operation
5217 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5218 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5219 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5220 // Note that with the variant of 3.,
5221 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5222 // the inner pattern will first be matched as GREVI and then the outer
5223 // pattern will be matched to GORC via the first rule above.
5224 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5225 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5226                                const RISCVSubtarget &Subtarget) {
5227   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5228   EVT VT = Op.getValueType();
5229 
5230   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5231     SDLoc DL(Op);
5232     SDValue Op0 = Op.getOperand(0);
5233     SDValue Op1 = Op.getOperand(1);
5234 
5235     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5236       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5237           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5238           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5239         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5240       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5241       if ((Reverse.getOpcode() == ISD::ROTL ||
5242            Reverse.getOpcode() == ISD::ROTR) &&
5243           Reverse.getOperand(0) == X &&
5244           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5245         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5246         if (RotAmt == (VT.getSizeInBits() / 2))
5247           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5248                              DAG.getConstant(RotAmt, DL, VT));
5249       }
5250       return SDValue();
5251     };
5252 
5253     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5254     if (SDValue V = MatchOROfReverse(Op0, Op1))
5255       return V;
5256     if (SDValue V = MatchOROfReverse(Op1, Op0))
5257       return V;
5258 
5259     // OR is commutable so canonicalize its OR operand to the left
5260     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5261       std::swap(Op0, Op1);
5262     if (Op0.getOpcode() != ISD::OR)
5263       return SDValue();
5264     SDValue OrOp0 = Op0.getOperand(0);
5265     SDValue OrOp1 = Op0.getOperand(1);
5266     auto LHS = matchGREVIPat(OrOp0);
5267     // OR is commutable so swap the operands and try again: x might have been
5268     // on the left
5269     if (!LHS) {
5270       std::swap(OrOp0, OrOp1);
5271       LHS = matchGREVIPat(OrOp0);
5272     }
5273     auto RHS = matchGREVIPat(Op1);
5274     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5275       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5276                          DAG.getConstant(LHS->ShAmt, DL, VT));
5277     }
5278   }
5279   return SDValue();
5280 }
5281 
5282 // Matches any of the following bit-manipulation patterns:
5283 //   (and (shl x, 1), (0x22222222 << 1))
5284 //   (and (srl x, 1), 0x22222222)
5285 //   (shl (and x, 0x22222222), 1)
5286 //   (srl (and x, (0x22222222 << 1)), 1)
5287 // where the shift amount and mask may vary thus:
5288 //   [1]  = 0x22222222 / 0x44444444
5289 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5290 //   [4]  = 0x00F000F0 / 0x0F000F00
5291 //   [8]  = 0x0000FF00 / 0x00FF0000
5292 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5293 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5294   // These are the unshifted masks which we use to match bit-manipulation
5295   // patterns. They may be shifted left in certain circumstances.
5296   static const uint64_t BitmanipMasks[] = {
5297       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5298       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5299 
5300   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5301 }
5302 
5303 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5304 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5305                                const RISCVSubtarget &Subtarget) {
5306   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5307   EVT VT = Op.getValueType();
5308 
5309   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5310     return SDValue();
5311 
5312   SDValue Op0 = Op.getOperand(0);
5313   SDValue Op1 = Op.getOperand(1);
5314 
5315   // Or is commutable so canonicalize the second OR to the LHS.
5316   if (Op0.getOpcode() != ISD::OR)
5317     std::swap(Op0, Op1);
5318   if (Op0.getOpcode() != ISD::OR)
5319     return SDValue();
5320 
5321   // We found an inner OR, so our operands are the operands of the inner OR
5322   // and the other operand of the outer OR.
5323   SDValue A = Op0.getOperand(0);
5324   SDValue B = Op0.getOperand(1);
5325   SDValue C = Op1;
5326 
5327   auto Match1 = matchSHFLPat(A);
5328   auto Match2 = matchSHFLPat(B);
5329 
5330   // If neither matched, we failed.
5331   if (!Match1 && !Match2)
5332     return SDValue();
5333 
5334   // We had at least one match. if one failed, try the remaining C operand.
5335   if (!Match1) {
5336     std::swap(A, C);
5337     Match1 = matchSHFLPat(A);
5338     if (!Match1)
5339       return SDValue();
5340   } else if (!Match2) {
5341     std::swap(B, C);
5342     Match2 = matchSHFLPat(B);
5343     if (!Match2)
5344       return SDValue();
5345   }
5346   assert(Match1 && Match2);
5347 
5348   // Make sure our matches pair up.
5349   if (!Match1->formsPairWith(*Match2))
5350     return SDValue();
5351 
5352   // All the remains is to make sure C is an AND with the same input, that masks
5353   // out the bits that are being shuffled.
5354   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5355       C.getOperand(0) != Match1->Op)
5356     return SDValue();
5357 
5358   uint64_t Mask = C.getConstantOperandVal(1);
5359 
5360   static const uint64_t BitmanipMasks[] = {
5361       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5362       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5363   };
5364 
5365   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5366   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5367   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5368 
5369   if (Mask != ExpMask)
5370     return SDValue();
5371 
5372   SDLoc DL(Op);
5373   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5374                      DAG.getConstant(Match1->ShAmt, DL, VT));
5375 }
5376 
5377 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5378 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5379 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5380 // not undo itself, but they are redundant.
5381 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5382   SDValue Src = N->getOperand(0);
5383 
5384   if (Src.getOpcode() != N->getOpcode())
5385     return SDValue();
5386 
5387   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5388       !isa<ConstantSDNode>(Src.getOperand(1)))
5389     return SDValue();
5390 
5391   unsigned ShAmt1 = N->getConstantOperandVal(1);
5392   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5393   Src = Src.getOperand(0);
5394 
5395   unsigned CombinedShAmt;
5396   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5397     CombinedShAmt = ShAmt1 | ShAmt2;
5398   else
5399     CombinedShAmt = ShAmt1 ^ ShAmt2;
5400 
5401   if (CombinedShAmt == 0)
5402     return Src;
5403 
5404   SDLoc DL(N);
5405   return DAG.getNode(
5406       N->getOpcode(), DL, N->getValueType(0), Src,
5407       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5408 }
5409 
5410 // Combine a constant select operand into its use:
5411 //
5412 // (and (select_cc lhs, rhs, cc, -1, c), x)
5413 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5414 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5415 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5416 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5417 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5418 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5419                                      SelectionDAG &DAG, bool AllOnes) {
5420   EVT VT = N->getValueType(0);
5421 
5422   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5423     return SDValue();
5424 
5425   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5426     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5427   };
5428 
5429   bool SwapSelectOps;
5430   SDValue TrueVal = Slct.getOperand(3);
5431   SDValue FalseVal = Slct.getOperand(4);
5432   SDValue NonConstantVal;
5433   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5434     SwapSelectOps = false;
5435     NonConstantVal = FalseVal;
5436   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5437     SwapSelectOps = true;
5438     NonConstantVal = TrueVal;
5439   } else
5440     return SDValue();
5441 
5442   // Slct is now know to be the desired identity constant when CC is true.
5443   TrueVal = OtherOp;
5444   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5445   // Unless SwapSelectOps says CC should be false.
5446   if (SwapSelectOps)
5447     std::swap(TrueVal, FalseVal);
5448 
5449   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5450                      {Slct.getOperand(0), Slct.getOperand(1),
5451                       Slct.getOperand(2), TrueVal, FalseVal});
5452 }
5453 
5454 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5455 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5456                                                 bool AllOnes) {
5457   SDValue N0 = N->getOperand(0);
5458   SDValue N1 = N->getOperand(1);
5459   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5460     return Result;
5461   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5462     return Result;
5463   return SDValue();
5464 }
5465 
5466 static SDValue performANDCombine(SDNode *N,
5467                                  TargetLowering::DAGCombinerInfo &DCI,
5468                                  const RISCVSubtarget &Subtarget) {
5469   SelectionDAG &DAG = DCI.DAG;
5470 
5471   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5472   //      (select lhs, rhs, cc, x, (and x, y))
5473   return combineSelectCCAndUseCommutative(N, DAG, true);
5474 }
5475 
5476 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5477                                 const RISCVSubtarget &Subtarget) {
5478   SelectionDAG &DAG = DCI.DAG;
5479   if (Subtarget.hasStdExtZbp()) {
5480     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5481       return GREV;
5482     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5483       return GORC;
5484     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5485       return SHFL;
5486   }
5487 
5488   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5489   //      (select lhs, rhs, cc, x, (or x, y))
5490   return combineSelectCCAndUseCommutative(N, DAG, false);
5491 }
5492 
5493 static SDValue performXORCombine(SDNode *N,
5494                                  TargetLowering::DAGCombinerInfo &DCI,
5495                                  const RISCVSubtarget &Subtarget) {
5496   SelectionDAG &DAG = DCI.DAG;
5497 
5498   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5499   //      (select lhs, rhs, cc, x, (xor x, y))
5500   return combineSelectCCAndUseCommutative(N, DAG, false);
5501 }
5502 
5503 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5504                                                DAGCombinerInfo &DCI) const {
5505   SelectionDAG &DAG = DCI.DAG;
5506 
5507   switch (N->getOpcode()) {
5508   default:
5509     break;
5510   case RISCVISD::SplitF64: {
5511     SDValue Op0 = N->getOperand(0);
5512     // If the input to SplitF64 is just BuildPairF64 then the operation is
5513     // redundant. Instead, use BuildPairF64's operands directly.
5514     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5515       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5516 
5517     SDLoc DL(N);
5518 
5519     // It's cheaper to materialise two 32-bit integers than to load a double
5520     // from the constant pool and transfer it to integer registers through the
5521     // stack.
5522     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5523       APInt V = C->getValueAPF().bitcastToAPInt();
5524       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5525       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5526       return DCI.CombineTo(N, Lo, Hi);
5527     }
5528 
5529     // This is a target-specific version of a DAGCombine performed in
5530     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5531     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5532     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5533     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5534         !Op0.getNode()->hasOneUse())
5535       break;
5536     SDValue NewSplitF64 =
5537         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5538                     Op0.getOperand(0));
5539     SDValue Lo = NewSplitF64.getValue(0);
5540     SDValue Hi = NewSplitF64.getValue(1);
5541     APInt SignBit = APInt::getSignMask(32);
5542     if (Op0.getOpcode() == ISD::FNEG) {
5543       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5544                                   DAG.getConstant(SignBit, DL, MVT::i32));
5545       return DCI.CombineTo(N, Lo, NewHi);
5546     }
5547     assert(Op0.getOpcode() == ISD::FABS);
5548     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5549                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5550     return DCI.CombineTo(N, Lo, NewHi);
5551   }
5552   case RISCVISD::SLLW:
5553   case RISCVISD::SRAW:
5554   case RISCVISD::SRLW:
5555   case RISCVISD::ROLW:
5556   case RISCVISD::RORW: {
5557     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5558     SDValue LHS = N->getOperand(0);
5559     SDValue RHS = N->getOperand(1);
5560     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5561     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5562     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5563         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5564       if (N->getOpcode() != ISD::DELETED_NODE)
5565         DCI.AddToWorklist(N);
5566       return SDValue(N, 0);
5567     }
5568     break;
5569   }
5570   case RISCVISD::CLZW:
5571   case RISCVISD::CTZW: {
5572     // Only the lower 32 bits of the first operand are read
5573     SDValue Op0 = N->getOperand(0);
5574     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5575     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5576       if (N->getOpcode() != ISD::DELETED_NODE)
5577         DCI.AddToWorklist(N);
5578       return SDValue(N, 0);
5579     }
5580     break;
5581   }
5582   case RISCVISD::FSL:
5583   case RISCVISD::FSR: {
5584     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5585     SDValue ShAmt = N->getOperand(2);
5586     unsigned BitWidth = ShAmt.getValueSizeInBits();
5587     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5588     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5589     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5590       if (N->getOpcode() != ISD::DELETED_NODE)
5591         DCI.AddToWorklist(N);
5592       return SDValue(N, 0);
5593     }
5594     break;
5595   }
5596   case RISCVISD::FSLW:
5597   case RISCVISD::FSRW: {
5598     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5599     // read.
5600     SDValue Op0 = N->getOperand(0);
5601     SDValue Op1 = N->getOperand(1);
5602     SDValue ShAmt = N->getOperand(2);
5603     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5604     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5605     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5606         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5607         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5608       if (N->getOpcode() != ISD::DELETED_NODE)
5609         DCI.AddToWorklist(N);
5610       return SDValue(N, 0);
5611     }
5612     break;
5613   }
5614   case RISCVISD::GREV:
5615   case RISCVISD::GORC: {
5616     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5617     SDValue ShAmt = N->getOperand(1);
5618     unsigned BitWidth = ShAmt.getValueSizeInBits();
5619     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5620     APInt ShAmtMask(BitWidth, BitWidth - 1);
5621     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5622       if (N->getOpcode() != ISD::DELETED_NODE)
5623         DCI.AddToWorklist(N);
5624       return SDValue(N, 0);
5625     }
5626 
5627     return combineGREVI_GORCI(N, DCI.DAG);
5628   }
5629   case RISCVISD::GREVW:
5630   case RISCVISD::GORCW: {
5631     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5632     SDValue LHS = N->getOperand(0);
5633     SDValue RHS = N->getOperand(1);
5634     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5635     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5636     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5637         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5638       if (N->getOpcode() != ISD::DELETED_NODE)
5639         DCI.AddToWorklist(N);
5640       return SDValue(N, 0);
5641     }
5642 
5643     return combineGREVI_GORCI(N, DCI.DAG);
5644   }
5645   case RISCVISD::SHFL:
5646   case RISCVISD::UNSHFL: {
5647     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5648     SDValue ShAmt = N->getOperand(1);
5649     unsigned BitWidth = ShAmt.getValueSizeInBits();
5650     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5651     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5652     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5653       if (N->getOpcode() != ISD::DELETED_NODE)
5654         DCI.AddToWorklist(N);
5655       return SDValue(N, 0);
5656     }
5657 
5658     break;
5659   }
5660   case RISCVISD::SHFLW:
5661   case RISCVISD::UNSHFLW: {
5662     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5663     SDValue LHS = N->getOperand(0);
5664     SDValue RHS = N->getOperand(1);
5665     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5666     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5667     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5668         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5669       if (N->getOpcode() != ISD::DELETED_NODE)
5670         DCI.AddToWorklist(N);
5671       return SDValue(N, 0);
5672     }
5673 
5674     break;
5675   }
5676   case RISCVISD::BCOMPRESSW:
5677   case RISCVISD::BDECOMPRESSW: {
5678     // Only the lower 32 bits of LHS and RHS are read.
5679     SDValue LHS = N->getOperand(0);
5680     SDValue RHS = N->getOperand(1);
5681     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5682     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5683         SimplifyDemandedBits(RHS, Mask, DCI)) {
5684       if (N->getOpcode() != ISD::DELETED_NODE)
5685         DCI.AddToWorklist(N);
5686       return SDValue(N, 0);
5687     }
5688 
5689     break;
5690   }
5691   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5692     SDLoc DL(N);
5693     SDValue Op0 = N->getOperand(0);
5694     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5695     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5696     // of the FMV_W_X_RV64 operand.
5697     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5698       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5699              "Unexpected value type!");
5700       return Op0.getOperand(0);
5701     }
5702 
5703     // This is a target-specific version of a DAGCombine performed in
5704     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5705     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5706     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5707     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5708         !Op0.getNode()->hasOneUse())
5709       break;
5710     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5711                                  Op0.getOperand(0));
5712     APInt SignBit = APInt::getSignMask(32).sext(64);
5713     if (Op0.getOpcode() == ISD::FNEG)
5714       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5715                          DAG.getConstant(SignBit, DL, MVT::i64));
5716 
5717     assert(Op0.getOpcode() == ISD::FABS);
5718     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5719                        DAG.getConstant(~SignBit, DL, MVT::i64));
5720   }
5721   case ISD::AND:
5722     return performANDCombine(N, DCI, Subtarget);
5723   case ISD::OR:
5724     return performORCombine(N, DCI, Subtarget);
5725   case ISD::XOR:
5726     return performXORCombine(N, DCI, Subtarget);
5727   case RISCVISD::SELECT_CC: {
5728     // Transform
5729     SDValue LHS = N->getOperand(0);
5730     SDValue RHS = N->getOperand(1);
5731     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5732     if (!ISD::isIntEqualitySetCC(CCVal))
5733       break;
5734 
5735     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5736     //      (select_cc X, Y, lt, trueV, falseV)
5737     // Sometimes the setcc is introduced after select_cc has been formed.
5738     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5739         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5740       // If we're looking for eq 0 instead of ne 0, we need to invert the
5741       // condition.
5742       bool Invert = CCVal == ISD::SETEQ;
5743       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5744       if (Invert)
5745         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5746 
5747       SDLoc DL(N);
5748       RHS = LHS.getOperand(1);
5749       LHS = LHS.getOperand(0);
5750       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5751 
5752       SDValue TargetCC =
5753           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5754       return DAG.getNode(
5755           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5756           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5757     }
5758 
5759     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5760     //      (select_cc X, Y, eq/ne, trueV, falseV)
5761     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5762       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5763                          {LHS.getOperand(0), LHS.getOperand(1),
5764                           N->getOperand(2), N->getOperand(3),
5765                           N->getOperand(4)});
5766     // (select_cc X, 1, setne, trueV, falseV) ->
5767     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5768     // This can occur when legalizing some floating point comparisons.
5769     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5770     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5771       SDLoc DL(N);
5772       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5773       SDValue TargetCC =
5774           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5775       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5776       return DAG.getNode(
5777           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5778           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5779     }
5780 
5781     break;
5782   }
5783   case RISCVISD::BR_CC: {
5784     SDValue LHS = N->getOperand(1);
5785     SDValue RHS = N->getOperand(2);
5786     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5787     if (!ISD::isIntEqualitySetCC(CCVal))
5788       break;
5789 
5790     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5791     //      (br_cc X, Y, lt, dest)
5792     // Sometimes the setcc is introduced after br_cc has been formed.
5793     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5794         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5795       // If we're looking for eq 0 instead of ne 0, we need to invert the
5796       // condition.
5797       bool Invert = CCVal == ISD::SETEQ;
5798       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5799       if (Invert)
5800         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5801 
5802       SDLoc DL(N);
5803       RHS = LHS.getOperand(1);
5804       LHS = LHS.getOperand(0);
5805       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5806 
5807       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5808                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5809                          N->getOperand(4));
5810     }
5811 
5812     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5813     //      (br_cc X, Y, eq/ne, trueV, falseV)
5814     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5815       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5816                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5817                          N->getOperand(3), N->getOperand(4));
5818 
5819     // (br_cc X, 1, setne, br_cc) ->
5820     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5821     // This can occur when legalizing some floating point comparisons.
5822     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5823     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5824       SDLoc DL(N);
5825       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5826       SDValue TargetCC = DAG.getCondCode(CCVal);
5827       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5828       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5829                          N->getOperand(0), LHS, RHS, TargetCC,
5830                          N->getOperand(4));
5831     }
5832     break;
5833   }
5834   case ISD::FCOPYSIGN: {
5835     EVT VT = N->getValueType(0);
5836     if (!VT.isVector())
5837       break;
5838     // There is a form of VFSGNJ which injects the negated sign of its second
5839     // operand. Try and bubble any FNEG up after the extend/round to produce
5840     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5841     // TRUNC=1.
5842     SDValue In2 = N->getOperand(1);
5843     // Avoid cases where the extend/round has multiple uses, as duplicating
5844     // those is typically more expensive than removing a fneg.
5845     if (!In2.hasOneUse())
5846       break;
5847     if (In2.getOpcode() != ISD::FP_EXTEND &&
5848         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5849       break;
5850     In2 = In2.getOperand(0);
5851     if (In2.getOpcode() != ISD::FNEG)
5852       break;
5853     SDLoc DL(N);
5854     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5855     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5856                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5857   }
5858   case ISD::MGATHER:
5859   case ISD::MSCATTER: {
5860     if (!DCI.isBeforeLegalize())
5861       break;
5862     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5863     SDValue Index = MGSN->getIndex();
5864     EVT IndexVT = Index.getValueType();
5865     MVT XLenVT = Subtarget.getXLenVT();
5866     // RISCV indexed loads only support the "unsigned unscaled" addressing
5867     // mode, so anything else must be manually legalized.
5868     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5869                                 (MGSN->isIndexSigned() &&
5870                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5871     if (!NeedsIdxLegalization)
5872       break;
5873 
5874     SDLoc DL(N);
5875 
5876     // Any index legalization should first promote to XLenVT, so we don't lose
5877     // bits when scaling. This may create an illegal index type so we let
5878     // LLVM's legalization take care of the splitting.
5879     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5880       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5881       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5882                                                 : ISD::ZERO_EXTEND,
5883                           DL, IndexVT, Index);
5884     }
5885 
5886     unsigned Scale = N->getConstantOperandVal(5);
5887     if (MGSN->isIndexScaled() && Scale != 1) {
5888       // Manually scale the indices by the element size.
5889       // TODO: Sanitize the scale operand here?
5890       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5891       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5892       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5893     }
5894 
5895     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
5896     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
5897       return DAG.getMaskedGather(
5898           N->getVTList(), MGSN->getMemoryVT(), DL,
5899           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
5900            MGSN->getBasePtr(), Index, MGN->getScale()},
5901           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
5902     }
5903     const auto *MSN = cast<MaskedScatterSDNode>(N);
5904     return DAG.getMaskedScatter(
5905         N->getVTList(), MGSN->getMemoryVT(), DL,
5906         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
5907          Index, MGSN->getScale()},
5908         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
5909   }
5910   }
5911 
5912   return SDValue();
5913 }
5914 
5915 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
5916     const SDNode *N, CombineLevel Level) const {
5917   // The following folds are only desirable if `(OP _, c1 << c2)` can be
5918   // materialised in fewer instructions than `(OP _, c1)`:
5919   //
5920   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
5921   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
5922   SDValue N0 = N->getOperand(0);
5923   EVT Ty = N0.getValueType();
5924   if (Ty.isScalarInteger() &&
5925       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
5926     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
5927     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
5928     if (C1 && C2) {
5929       const APInt &C1Int = C1->getAPIntValue();
5930       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
5931 
5932       // We can materialise `c1 << c2` into an add immediate, so it's "free",
5933       // and the combine should happen, to potentially allow further combines
5934       // later.
5935       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
5936           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
5937         return true;
5938 
5939       // We can materialise `c1` in an add immediate, so it's "free", and the
5940       // combine should be prevented.
5941       if (C1Int.getMinSignedBits() <= 64 &&
5942           isLegalAddImmediate(C1Int.getSExtValue()))
5943         return false;
5944 
5945       // Neither constant will fit into an immediate, so find materialisation
5946       // costs.
5947       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
5948                                               Subtarget.is64Bit());
5949       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
5950           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
5951 
5952       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
5953       // combine should be prevented.
5954       if (C1Cost < ShiftedC1Cost)
5955         return false;
5956     }
5957   }
5958   return true;
5959 }
5960 
5961 bool RISCVTargetLowering::targetShrinkDemandedConstant(
5962     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
5963     TargetLoweringOpt &TLO) const {
5964   // Delay this optimization as late as possible.
5965   if (!TLO.LegalOps)
5966     return false;
5967 
5968   EVT VT = Op.getValueType();
5969   if (VT.isVector())
5970     return false;
5971 
5972   // Only handle AND for now.
5973   if (Op.getOpcode() != ISD::AND)
5974     return false;
5975 
5976   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5977   if (!C)
5978     return false;
5979 
5980   const APInt &Mask = C->getAPIntValue();
5981 
5982   // Clear all non-demanded bits initially.
5983   APInt ShrunkMask = Mask & DemandedBits;
5984 
5985   // Try to make a smaller immediate by setting undemanded bits.
5986 
5987   APInt ExpandedMask = Mask | ~DemandedBits;
5988 
5989   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
5990     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
5991   };
5992   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
5993     if (NewMask == Mask)
5994       return true;
5995     SDLoc DL(Op);
5996     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
5997     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
5998     return TLO.CombineTo(Op, NewOp);
5999   };
6000 
6001   // If the shrunk mask fits in sign extended 12 bits, let the target
6002   // independent code apply it.
6003   if (ShrunkMask.isSignedIntN(12))
6004     return false;
6005 
6006   // Preserve (and X, 0xffff) when zext.h is supported.
6007   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6008     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6009     if (IsLegalMask(NewMask))
6010       return UseMask(NewMask);
6011   }
6012 
6013   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6014   if (VT == MVT::i64) {
6015     APInt NewMask = APInt(64, 0xffffffff);
6016     if (IsLegalMask(NewMask))
6017       return UseMask(NewMask);
6018   }
6019 
6020   // For the remaining optimizations, we need to be able to make a negative
6021   // number through a combination of mask and undemanded bits.
6022   if (!ExpandedMask.isNegative())
6023     return false;
6024 
6025   // What is the fewest number of bits we need to represent the negative number.
6026   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6027 
6028   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6029   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6030   APInt NewMask = ShrunkMask;
6031   if (MinSignedBits <= 12)
6032     NewMask.setBitsFrom(11);
6033   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6034     NewMask.setBitsFrom(31);
6035   else
6036     return false;
6037 
6038   // Sanity check that our new mask is a subset of the demanded mask.
6039   assert(IsLegalMask(NewMask));
6040   return UseMask(NewMask);
6041 }
6042 
6043 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6044                                                         KnownBits &Known,
6045                                                         const APInt &DemandedElts,
6046                                                         const SelectionDAG &DAG,
6047                                                         unsigned Depth) const {
6048   unsigned BitWidth = Known.getBitWidth();
6049   unsigned Opc = Op.getOpcode();
6050   assert((Opc >= ISD::BUILTIN_OP_END ||
6051           Opc == ISD::INTRINSIC_WO_CHAIN ||
6052           Opc == ISD::INTRINSIC_W_CHAIN ||
6053           Opc == ISD::INTRINSIC_VOID) &&
6054          "Should use MaskedValueIsZero if you don't know whether Op"
6055          " is a target node!");
6056 
6057   Known.resetAll();
6058   switch (Opc) {
6059   default: break;
6060   case RISCVISD::SELECT_CC: {
6061     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6062     // If we don't know any bits, early out.
6063     if (Known.isUnknown())
6064       break;
6065     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6066 
6067     // Only known if known in both the LHS and RHS.
6068     Known = KnownBits::commonBits(Known, Known2);
6069     break;
6070   }
6071   case RISCVISD::REMUW: {
6072     KnownBits Known2;
6073     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6074     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6075     // We only care about the lower 32 bits.
6076     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6077     // Restore the original width by sign extending.
6078     Known = Known.sext(BitWidth);
6079     break;
6080   }
6081   case RISCVISD::DIVUW: {
6082     KnownBits Known2;
6083     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6084     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6085     // We only care about the lower 32 bits.
6086     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6087     // Restore the original width by sign extending.
6088     Known = Known.sext(BitWidth);
6089     break;
6090   }
6091   case RISCVISD::CTZW: {
6092     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6093     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6094     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6095     Known.Zero.setBitsFrom(LowBits);
6096     break;
6097   }
6098   case RISCVISD::CLZW: {
6099     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6100     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6101     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6102     Known.Zero.setBitsFrom(LowBits);
6103     break;
6104   }
6105   case RISCVISD::READ_VLENB:
6106     // We assume VLENB is at least 16 bytes.
6107     Known.Zero.setLowBits(4);
6108     break;
6109   case ISD::INTRINSIC_W_CHAIN: {
6110     unsigned IntNo = Op.getConstantOperandVal(1);
6111     switch (IntNo) {
6112     default:
6113       // We can't do anything for most intrinsics.
6114       break;
6115     case Intrinsic::riscv_vsetvli:
6116     case Intrinsic::riscv_vsetvlimax:
6117       // Assume that VL output is positive and would fit in an int32_t.
6118       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6119       if (BitWidth >= 32)
6120         Known.Zero.setBitsFrom(31);
6121       break;
6122     }
6123     break;
6124   }
6125   }
6126 }
6127 
6128 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6129     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6130     unsigned Depth) const {
6131   switch (Op.getOpcode()) {
6132   default:
6133     break;
6134   case RISCVISD::SLLW:
6135   case RISCVISD::SRAW:
6136   case RISCVISD::SRLW:
6137   case RISCVISD::DIVW:
6138   case RISCVISD::DIVUW:
6139   case RISCVISD::REMUW:
6140   case RISCVISD::ROLW:
6141   case RISCVISD::RORW:
6142   case RISCVISD::GREVW:
6143   case RISCVISD::GORCW:
6144   case RISCVISD::FSLW:
6145   case RISCVISD::FSRW:
6146   case RISCVISD::SHFLW:
6147   case RISCVISD::UNSHFLW:
6148   case RISCVISD::BCOMPRESSW:
6149   case RISCVISD::BDECOMPRESSW:
6150     // TODO: As the result is sign-extended, this is conservatively correct. A
6151     // more precise answer could be calculated for SRAW depending on known
6152     // bits in the shift amount.
6153     return 33;
6154   case RISCVISD::SHFL:
6155   case RISCVISD::UNSHFL: {
6156     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6157     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6158     // will stay within the upper 32 bits. If there were more than 32 sign bits
6159     // before there will be at least 33 sign bits after.
6160     if (Op.getValueType() == MVT::i64 &&
6161         isa<ConstantSDNode>(Op.getOperand(1)) &&
6162         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6163       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6164       if (Tmp > 32)
6165         return 33;
6166     }
6167     break;
6168   }
6169   case RISCVISD::VMV_X_S:
6170     // The number of sign bits of the scalar result is computed by obtaining the
6171     // element type of the input vector operand, subtracting its width from the
6172     // XLEN, and then adding one (sign bit within the element type). If the
6173     // element type is wider than XLen, the least-significant XLEN bits are
6174     // taken.
6175     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6176       return 1;
6177     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6178   }
6179 
6180   return 1;
6181 }
6182 
6183 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6184                                                   MachineBasicBlock *BB) {
6185   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6186 
6187   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6188   // Should the count have wrapped while it was being read, we need to try
6189   // again.
6190   // ...
6191   // read:
6192   // rdcycleh x3 # load high word of cycle
6193   // rdcycle  x2 # load low word of cycle
6194   // rdcycleh x4 # load high word of cycle
6195   // bne x3, x4, read # check if high word reads match, otherwise try again
6196   // ...
6197 
6198   MachineFunction &MF = *BB->getParent();
6199   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6200   MachineFunction::iterator It = ++BB->getIterator();
6201 
6202   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6203   MF.insert(It, LoopMBB);
6204 
6205   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6206   MF.insert(It, DoneMBB);
6207 
6208   // Transfer the remainder of BB and its successor edges to DoneMBB.
6209   DoneMBB->splice(DoneMBB->begin(), BB,
6210                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6211   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6212 
6213   BB->addSuccessor(LoopMBB);
6214 
6215   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6216   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6217   Register LoReg = MI.getOperand(0).getReg();
6218   Register HiReg = MI.getOperand(1).getReg();
6219   DebugLoc DL = MI.getDebugLoc();
6220 
6221   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6222   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6223       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6224       .addReg(RISCV::X0);
6225   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6226       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6227       .addReg(RISCV::X0);
6228   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6229       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6230       .addReg(RISCV::X0);
6231 
6232   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6233       .addReg(HiReg)
6234       .addReg(ReadAgainReg)
6235       .addMBB(LoopMBB);
6236 
6237   LoopMBB->addSuccessor(LoopMBB);
6238   LoopMBB->addSuccessor(DoneMBB);
6239 
6240   MI.eraseFromParent();
6241 
6242   return DoneMBB;
6243 }
6244 
6245 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6246                                              MachineBasicBlock *BB) {
6247   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6248 
6249   MachineFunction &MF = *BB->getParent();
6250   DebugLoc DL = MI.getDebugLoc();
6251   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6252   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6253   Register LoReg = MI.getOperand(0).getReg();
6254   Register HiReg = MI.getOperand(1).getReg();
6255   Register SrcReg = MI.getOperand(2).getReg();
6256   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6257   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6258 
6259   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6260                           RI);
6261   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6262   MachineMemOperand *MMOLo =
6263       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6264   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6265       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6266   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6267       .addFrameIndex(FI)
6268       .addImm(0)
6269       .addMemOperand(MMOLo);
6270   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6271       .addFrameIndex(FI)
6272       .addImm(4)
6273       .addMemOperand(MMOHi);
6274   MI.eraseFromParent(); // The pseudo instruction is gone now.
6275   return BB;
6276 }
6277 
6278 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6279                                                  MachineBasicBlock *BB) {
6280   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6281          "Unexpected instruction");
6282 
6283   MachineFunction &MF = *BB->getParent();
6284   DebugLoc DL = MI.getDebugLoc();
6285   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6286   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6287   Register DstReg = MI.getOperand(0).getReg();
6288   Register LoReg = MI.getOperand(1).getReg();
6289   Register HiReg = MI.getOperand(2).getReg();
6290   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6291   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6292 
6293   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6294   MachineMemOperand *MMOLo =
6295       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6296   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6297       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6298   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6299       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6300       .addFrameIndex(FI)
6301       .addImm(0)
6302       .addMemOperand(MMOLo);
6303   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6304       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6305       .addFrameIndex(FI)
6306       .addImm(4)
6307       .addMemOperand(MMOHi);
6308   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6309   MI.eraseFromParent(); // The pseudo instruction is gone now.
6310   return BB;
6311 }
6312 
6313 static bool isSelectPseudo(MachineInstr &MI) {
6314   switch (MI.getOpcode()) {
6315   default:
6316     return false;
6317   case RISCV::Select_GPR_Using_CC_GPR:
6318   case RISCV::Select_FPR16_Using_CC_GPR:
6319   case RISCV::Select_FPR32_Using_CC_GPR:
6320   case RISCV::Select_FPR64_Using_CC_GPR:
6321     return true;
6322   }
6323 }
6324 
6325 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6326                                            MachineBasicBlock *BB) {
6327   // To "insert" Select_* instructions, we actually have to insert the triangle
6328   // control-flow pattern.  The incoming instructions know the destination vreg
6329   // to set, the condition code register to branch on, the true/false values to
6330   // select between, and the condcode to use to select the appropriate branch.
6331   //
6332   // We produce the following control flow:
6333   //     HeadMBB
6334   //     |  \
6335   //     |  IfFalseMBB
6336   //     | /
6337   //    TailMBB
6338   //
6339   // When we find a sequence of selects we attempt to optimize their emission
6340   // by sharing the control flow. Currently we only handle cases where we have
6341   // multiple selects with the exact same condition (same LHS, RHS and CC).
6342   // The selects may be interleaved with other instructions if the other
6343   // instructions meet some requirements we deem safe:
6344   // - They are debug instructions. Otherwise,
6345   // - They do not have side-effects, do not access memory and their inputs do
6346   //   not depend on the results of the select pseudo-instructions.
6347   // The TrueV/FalseV operands of the selects cannot depend on the result of
6348   // previous selects in the sequence.
6349   // These conditions could be further relaxed. See the X86 target for a
6350   // related approach and more information.
6351   Register LHS = MI.getOperand(1).getReg();
6352   Register RHS = MI.getOperand(2).getReg();
6353   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6354 
6355   SmallVector<MachineInstr *, 4> SelectDebugValues;
6356   SmallSet<Register, 4> SelectDests;
6357   SelectDests.insert(MI.getOperand(0).getReg());
6358 
6359   MachineInstr *LastSelectPseudo = &MI;
6360 
6361   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6362        SequenceMBBI != E; ++SequenceMBBI) {
6363     if (SequenceMBBI->isDebugInstr())
6364       continue;
6365     else if (isSelectPseudo(*SequenceMBBI)) {
6366       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6367           SequenceMBBI->getOperand(2).getReg() != RHS ||
6368           SequenceMBBI->getOperand(3).getImm() != CC ||
6369           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6370           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6371         break;
6372       LastSelectPseudo = &*SequenceMBBI;
6373       SequenceMBBI->collectDebugValues(SelectDebugValues);
6374       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6375     } else {
6376       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6377           SequenceMBBI->mayLoadOrStore())
6378         break;
6379       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6380             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6381           }))
6382         break;
6383     }
6384   }
6385 
6386   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6387   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6388   DebugLoc DL = MI.getDebugLoc();
6389   MachineFunction::iterator I = ++BB->getIterator();
6390 
6391   MachineBasicBlock *HeadMBB = BB;
6392   MachineFunction *F = BB->getParent();
6393   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6394   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6395 
6396   F->insert(I, IfFalseMBB);
6397   F->insert(I, TailMBB);
6398 
6399   // Transfer debug instructions associated with the selects to TailMBB.
6400   for (MachineInstr *DebugInstr : SelectDebugValues) {
6401     TailMBB->push_back(DebugInstr->removeFromParent());
6402   }
6403 
6404   // Move all instructions after the sequence to TailMBB.
6405   TailMBB->splice(TailMBB->end(), HeadMBB,
6406                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6407   // Update machine-CFG edges by transferring all successors of the current
6408   // block to the new block which will contain the Phi nodes for the selects.
6409   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6410   // Set the successors for HeadMBB.
6411   HeadMBB->addSuccessor(IfFalseMBB);
6412   HeadMBB->addSuccessor(TailMBB);
6413 
6414   // Insert appropriate branch.
6415   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6416 
6417   BuildMI(HeadMBB, DL, TII.get(Opcode))
6418     .addReg(LHS)
6419     .addReg(RHS)
6420     .addMBB(TailMBB);
6421 
6422   // IfFalseMBB just falls through to TailMBB.
6423   IfFalseMBB->addSuccessor(TailMBB);
6424 
6425   // Create PHIs for all of the select pseudo-instructions.
6426   auto SelectMBBI = MI.getIterator();
6427   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6428   auto InsertionPoint = TailMBB->begin();
6429   while (SelectMBBI != SelectEnd) {
6430     auto Next = std::next(SelectMBBI);
6431     if (isSelectPseudo(*SelectMBBI)) {
6432       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6433       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6434               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6435           .addReg(SelectMBBI->getOperand(4).getReg())
6436           .addMBB(HeadMBB)
6437           .addReg(SelectMBBI->getOperand(5).getReg())
6438           .addMBB(IfFalseMBB);
6439       SelectMBBI->eraseFromParent();
6440     }
6441     SelectMBBI = Next;
6442   }
6443 
6444   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6445   return TailMBB;
6446 }
6447 
6448 static MachineInstr *elideCopies(MachineInstr *MI,
6449                                  const MachineRegisterInfo &MRI) {
6450   while (true) {
6451     if (!MI->isFullCopy())
6452       return MI;
6453     if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
6454       return nullptr;
6455     MI = MRI.getVRegDef(MI->getOperand(1).getReg());
6456     if (!MI)
6457       return nullptr;
6458   }
6459 }
6460 
6461 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
6462                                     int VLIndex, unsigned SEWIndex,
6463                                     RISCVII::VLMUL VLMul,
6464                                     bool ForceTailAgnostic) {
6465   MachineFunction &MF = *BB->getParent();
6466   DebugLoc DL = MI.getDebugLoc();
6467   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6468 
6469   unsigned Log2SEW = MI.getOperand(SEWIndex).getImm();
6470   unsigned SEW = 1 << Log2SEW;
6471   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
6472 
6473   MachineRegisterInfo &MRI = MF.getRegInfo();
6474 
6475   auto BuildVSETVLI = [&]() {
6476     if (VLIndex >= 0) {
6477       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
6478       const MachineOperand &VLOp = MI.getOperand(VLIndex);
6479 
6480       // VL can be a register or an immediate.
6481       if (VLOp.isImm())
6482         return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
6483             .addReg(DestReg, RegState::Define | RegState::Dead)
6484             .addImm(VLOp.getImm());
6485 
6486       Register VLReg = MI.getOperand(VLIndex).getReg();
6487       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6488           .addReg(DestReg, RegState::Define | RegState::Dead)
6489           .addReg(VLReg);
6490     }
6491 
6492     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
6493     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6494         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
6495         .addReg(RISCV::X0, RegState::Kill);
6496   };
6497 
6498   MachineInstrBuilder MIB = BuildVSETVLI();
6499 
6500   // Default to tail agnostic unless the destination is tied to a source. In
6501   // that case the user would have some control over the tail values. The tail
6502   // policy is also ignored on instructions that only update element 0 like
6503   // vmv.s.x or reductions so use agnostic there to match the common case.
6504   // FIXME: This is conservatively correct, but we might want to detect that
6505   // the input is undefined.
6506   bool TailAgnostic = true;
6507   unsigned UseOpIdx;
6508   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
6509     TailAgnostic = false;
6510     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
6511     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
6512     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
6513     if (UseMI) {
6514       UseMI = elideCopies(UseMI, MRI);
6515       if (UseMI && UseMI->isImplicitDef())
6516         TailAgnostic = true;
6517     }
6518   }
6519 
6520   // For simplicity we reuse the vtype representation here.
6521   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, SEW,
6522                                      /*TailAgnostic*/ TailAgnostic,
6523                                      /*MaskAgnostic*/ false));
6524 
6525   // Remove (now) redundant operands from pseudo
6526   if (VLIndex >= 0 && MI.getOperand(VLIndex).isReg()) {
6527     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
6528     MI.getOperand(VLIndex).setIsKill(false);
6529   }
6530 
6531   return BB;
6532 }
6533 
6534 MachineBasicBlock *
6535 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6536                                                  MachineBasicBlock *BB) const {
6537   uint64_t TSFlags = MI.getDesc().TSFlags;
6538 
6539   if (RISCVII::hasSEWOp(TSFlags)) {
6540     unsigned NumOperands = MI.getNumExplicitOperands();
6541     int VLIndex = RISCVII::hasVLOp(TSFlags) ? NumOperands - 2 : -1;
6542     unsigned SEWIndex = NumOperands - 1;
6543     bool ForceTailAgnostic = RISCVII::doesForceTailAgnostic(TSFlags);
6544 
6545     RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
6546     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
6547   }
6548 
6549   switch (MI.getOpcode()) {
6550   default:
6551     llvm_unreachable("Unexpected instr type to insert");
6552   case RISCV::ReadCycleWide:
6553     assert(!Subtarget.is64Bit() &&
6554            "ReadCycleWrite is only to be used on riscv32");
6555     return emitReadCycleWidePseudo(MI, BB);
6556   case RISCV::Select_GPR_Using_CC_GPR:
6557   case RISCV::Select_FPR16_Using_CC_GPR:
6558   case RISCV::Select_FPR32_Using_CC_GPR:
6559   case RISCV::Select_FPR64_Using_CC_GPR:
6560     return emitSelectPseudo(MI, BB);
6561   case RISCV::BuildPairF64Pseudo:
6562     return emitBuildPairF64Pseudo(MI, BB);
6563   case RISCV::SplitF64Pseudo:
6564     return emitSplitF64Pseudo(MI, BB);
6565   }
6566 }
6567 
6568 // Calling Convention Implementation.
6569 // The expectations for frontend ABI lowering vary from target to target.
6570 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6571 // details, but this is a longer term goal. For now, we simply try to keep the
6572 // role of the frontend as simple and well-defined as possible. The rules can
6573 // be summarised as:
6574 // * Never split up large scalar arguments. We handle them here.
6575 // * If a hardfloat calling convention is being used, and the struct may be
6576 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6577 // available, then pass as two separate arguments. If either the GPRs or FPRs
6578 // are exhausted, then pass according to the rule below.
6579 // * If a struct could never be passed in registers or directly in a stack
6580 // slot (as it is larger than 2*XLEN and the floating point rules don't
6581 // apply), then pass it using a pointer with the byval attribute.
6582 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6583 // word-sized array or a 2*XLEN scalar (depending on alignment).
6584 // * The frontend can determine whether a struct is returned by reference or
6585 // not based on its size and fields. If it will be returned by reference, the
6586 // frontend must modify the prototype so a pointer with the sret annotation is
6587 // passed as the first argument. This is not necessary for large scalar
6588 // returns.
6589 // * Struct return values and varargs should be coerced to structs containing
6590 // register-size fields in the same situations they would be for fixed
6591 // arguments.
6592 
6593 static const MCPhysReg ArgGPRs[] = {
6594   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6595   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6596 };
6597 static const MCPhysReg ArgFPR16s[] = {
6598   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6599   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6600 };
6601 static const MCPhysReg ArgFPR32s[] = {
6602   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6603   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6604 };
6605 static const MCPhysReg ArgFPR64s[] = {
6606   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6607   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6608 };
6609 // This is an interim calling convention and it may be changed in the future.
6610 static const MCPhysReg ArgVRs[] = {
6611     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6612     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6613     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6614 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6615                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6616                                      RISCV::V20M2, RISCV::V22M2};
6617 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6618                                      RISCV::V20M4};
6619 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6620 
6621 // Pass a 2*XLEN argument that has been split into two XLEN values through
6622 // registers or the stack as necessary.
6623 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6624                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6625                                 MVT ValVT2, MVT LocVT2,
6626                                 ISD::ArgFlagsTy ArgFlags2) {
6627   unsigned XLenInBytes = XLen / 8;
6628   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6629     // At least one half can be passed via register.
6630     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6631                                      VA1.getLocVT(), CCValAssign::Full));
6632   } else {
6633     // Both halves must be passed on the stack, with proper alignment.
6634     Align StackAlign =
6635         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6636     State.addLoc(
6637         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6638                             State.AllocateStack(XLenInBytes, StackAlign),
6639                             VA1.getLocVT(), CCValAssign::Full));
6640     State.addLoc(CCValAssign::getMem(
6641         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6642         LocVT2, CCValAssign::Full));
6643     return false;
6644   }
6645 
6646   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6647     // The second half can also be passed via register.
6648     State.addLoc(
6649         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6650   } else {
6651     // The second half is passed via the stack, without additional alignment.
6652     State.addLoc(CCValAssign::getMem(
6653         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6654         LocVT2, CCValAssign::Full));
6655   }
6656 
6657   return false;
6658 }
6659 
6660 // Implements the RISC-V calling convention. Returns true upon failure.
6661 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6662                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6663                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6664                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6665                      Optional<unsigned> FirstMaskArgument) {
6666   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6667   assert(XLen == 32 || XLen == 64);
6668   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6669 
6670   // Any return value split in to more than two values can't be returned
6671   // directly. Vectors are returned via the available vector registers.
6672   if (!LocVT.isVector() && IsRet && ValNo > 1)
6673     return true;
6674 
6675   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6676   // variadic argument, or if no F16/F32 argument registers are available.
6677   bool UseGPRForF16_F32 = true;
6678   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6679   // variadic argument, or if no F64 argument registers are available.
6680   bool UseGPRForF64 = true;
6681 
6682   switch (ABI) {
6683   default:
6684     llvm_unreachable("Unexpected ABI");
6685   case RISCVABI::ABI_ILP32:
6686   case RISCVABI::ABI_LP64:
6687     break;
6688   case RISCVABI::ABI_ILP32F:
6689   case RISCVABI::ABI_LP64F:
6690     UseGPRForF16_F32 = !IsFixed;
6691     break;
6692   case RISCVABI::ABI_ILP32D:
6693   case RISCVABI::ABI_LP64D:
6694     UseGPRForF16_F32 = !IsFixed;
6695     UseGPRForF64 = !IsFixed;
6696     break;
6697   }
6698 
6699   // FPR16, FPR32, and FPR64 alias each other.
6700   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6701     UseGPRForF16_F32 = true;
6702     UseGPRForF64 = true;
6703   }
6704 
6705   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6706   // similar local variables rather than directly checking against the target
6707   // ABI.
6708 
6709   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6710     LocVT = XLenVT;
6711     LocInfo = CCValAssign::BCvt;
6712   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6713     LocVT = MVT::i64;
6714     LocInfo = CCValAssign::BCvt;
6715   }
6716 
6717   // If this is a variadic argument, the RISC-V calling convention requires
6718   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6719   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6720   // be used regardless of whether the original argument was split during
6721   // legalisation or not. The argument will not be passed by registers if the
6722   // original type is larger than 2*XLEN, so the register alignment rule does
6723   // not apply.
6724   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6725   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6726       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6727     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6728     // Skip 'odd' register if necessary.
6729     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6730       State.AllocateReg(ArgGPRs);
6731   }
6732 
6733   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6734   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6735       State.getPendingArgFlags();
6736 
6737   assert(PendingLocs.size() == PendingArgFlags.size() &&
6738          "PendingLocs and PendingArgFlags out of sync");
6739 
6740   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6741   // registers are exhausted.
6742   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6743     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6744            "Can't lower f64 if it is split");
6745     // Depending on available argument GPRS, f64 may be passed in a pair of
6746     // GPRs, split between a GPR and the stack, or passed completely on the
6747     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6748     // cases.
6749     Register Reg = State.AllocateReg(ArgGPRs);
6750     LocVT = MVT::i32;
6751     if (!Reg) {
6752       unsigned StackOffset = State.AllocateStack(8, Align(8));
6753       State.addLoc(
6754           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6755       return false;
6756     }
6757     if (!State.AllocateReg(ArgGPRs))
6758       State.AllocateStack(4, Align(4));
6759     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6760     return false;
6761   }
6762 
6763   // Fixed-length vectors are located in the corresponding scalable-vector
6764   // container types.
6765   if (ValVT.isFixedLengthVector())
6766     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6767 
6768   // Split arguments might be passed indirectly, so keep track of the pending
6769   // values. Split vectors are passed via a mix of registers and indirectly, so
6770   // treat them as we would any other argument.
6771   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6772     LocVT = XLenVT;
6773     LocInfo = CCValAssign::Indirect;
6774     PendingLocs.push_back(
6775         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6776     PendingArgFlags.push_back(ArgFlags);
6777     if (!ArgFlags.isSplitEnd()) {
6778       return false;
6779     }
6780   }
6781 
6782   // If the split argument only had two elements, it should be passed directly
6783   // in registers or on the stack.
6784   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6785     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6786     // Apply the normal calling convention rules to the first half of the
6787     // split argument.
6788     CCValAssign VA = PendingLocs[0];
6789     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6790     PendingLocs.clear();
6791     PendingArgFlags.clear();
6792     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6793                                ArgFlags);
6794   }
6795 
6796   // Allocate to a register if possible, or else a stack slot.
6797   Register Reg;
6798   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6799     Reg = State.AllocateReg(ArgFPR16s);
6800   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6801     Reg = State.AllocateReg(ArgFPR32s);
6802   else if (ValVT == MVT::f64 && !UseGPRForF64)
6803     Reg = State.AllocateReg(ArgFPR64s);
6804   else if (ValVT.isVector()) {
6805     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6806     if (RC == &RISCV::VRRegClass) {
6807       // Assign the first mask argument to V0.
6808       // This is an interim calling convention and it may be changed in the
6809       // future.
6810       if (FirstMaskArgument.hasValue() &&
6811           ValNo == FirstMaskArgument.getValue()) {
6812         Reg = State.AllocateReg(RISCV::V0);
6813       } else {
6814         Reg = State.AllocateReg(ArgVRs);
6815       }
6816     } else if (RC == &RISCV::VRM2RegClass) {
6817       Reg = State.AllocateReg(ArgVRM2s);
6818     } else if (RC == &RISCV::VRM4RegClass) {
6819       Reg = State.AllocateReg(ArgVRM4s);
6820     } else if (RC == &RISCV::VRM8RegClass) {
6821       Reg = State.AllocateReg(ArgVRM8s);
6822     } else {
6823       llvm_unreachable("Unhandled class register for ValueType");
6824     }
6825     if (!Reg) {
6826       // For return values, the vector must be passed fully via registers or
6827       // via the stack.
6828       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6829       // but we're using all of them.
6830       if (IsRet)
6831         return true;
6832       LocInfo = CCValAssign::Indirect;
6833       // Try using a GPR to pass the address
6834       Reg = State.AllocateReg(ArgGPRs);
6835       LocVT = XLenVT;
6836     }
6837   } else
6838     Reg = State.AllocateReg(ArgGPRs);
6839   unsigned StackOffset =
6840       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
6841 
6842   // If we reach this point and PendingLocs is non-empty, we must be at the
6843   // end of a split argument that must be passed indirectly.
6844   if (!PendingLocs.empty()) {
6845     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6846     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6847 
6848     for (auto &It : PendingLocs) {
6849       if (Reg)
6850         It.convertToReg(Reg);
6851       else
6852         It.convertToMem(StackOffset);
6853       State.addLoc(It);
6854     }
6855     PendingLocs.clear();
6856     PendingArgFlags.clear();
6857     return false;
6858   }
6859 
6860   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6861           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6862          "Expected an XLenVT or vector types at this stage");
6863 
6864   if (Reg) {
6865     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6866     return false;
6867   }
6868 
6869   // When a floating-point value is passed on the stack, no bit-conversion is
6870   // needed.
6871   if (ValVT.isFloatingPoint()) {
6872     LocVT = ValVT;
6873     LocInfo = CCValAssign::Full;
6874   }
6875   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6876   return false;
6877 }
6878 
6879 template <typename ArgTy>
6880 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6881   for (const auto &ArgIdx : enumerate(Args)) {
6882     MVT ArgVT = ArgIdx.value().VT;
6883     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6884       return ArgIdx.index();
6885   }
6886   return None;
6887 }
6888 
6889 void RISCVTargetLowering::analyzeInputArgs(
6890     MachineFunction &MF, CCState &CCInfo,
6891     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
6892   unsigned NumArgs = Ins.size();
6893   FunctionType *FType = MF.getFunction().getFunctionType();
6894 
6895   Optional<unsigned> FirstMaskArgument;
6896   if (Subtarget.hasStdExtV())
6897     FirstMaskArgument = preAssignMask(Ins);
6898 
6899   for (unsigned i = 0; i != NumArgs; ++i) {
6900     MVT ArgVT = Ins[i].VT;
6901     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6902 
6903     Type *ArgTy = nullptr;
6904     if (IsRet)
6905       ArgTy = FType->getReturnType();
6906     else if (Ins[i].isOrigArg())
6907       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6908 
6909     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6910     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6911                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
6912                  FirstMaskArgument)) {
6913       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
6914                         << EVT(ArgVT).getEVTString() << '\n');
6915       llvm_unreachable(nullptr);
6916     }
6917   }
6918 }
6919 
6920 void RISCVTargetLowering::analyzeOutputArgs(
6921     MachineFunction &MF, CCState &CCInfo,
6922     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
6923     CallLoweringInfo *CLI) const {
6924   unsigned NumArgs = Outs.size();
6925 
6926   Optional<unsigned> FirstMaskArgument;
6927   if (Subtarget.hasStdExtV())
6928     FirstMaskArgument = preAssignMask(Outs);
6929 
6930   for (unsigned i = 0; i != NumArgs; i++) {
6931     MVT ArgVT = Outs[i].VT;
6932     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6933     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
6934 
6935     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6936     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6937                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
6938                  FirstMaskArgument)) {
6939       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
6940                         << EVT(ArgVT).getEVTString() << "\n");
6941       llvm_unreachable(nullptr);
6942     }
6943   }
6944 }
6945 
6946 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
6947 // values.
6948 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
6949                                    const CCValAssign &VA, const SDLoc &DL,
6950                                    const RISCVSubtarget &Subtarget) {
6951   switch (VA.getLocInfo()) {
6952   default:
6953     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6954   case CCValAssign::Full:
6955     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
6956       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
6957     break;
6958   case CCValAssign::BCvt:
6959     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6960       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
6961     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6962       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
6963     else
6964       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6965     break;
6966   }
6967   return Val;
6968 }
6969 
6970 // The caller is responsible for loading the full value if the argument is
6971 // passed with CCValAssign::Indirect.
6972 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
6973                                 const CCValAssign &VA, const SDLoc &DL,
6974                                 const RISCVTargetLowering &TLI) {
6975   MachineFunction &MF = DAG.getMachineFunction();
6976   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6977   EVT LocVT = VA.getLocVT();
6978   SDValue Val;
6979   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
6980   Register VReg = RegInfo.createVirtualRegister(RC);
6981   RegInfo.addLiveIn(VA.getLocReg(), VReg);
6982   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
6983 
6984   if (VA.getLocInfo() == CCValAssign::Indirect)
6985     return Val;
6986 
6987   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
6988 }
6989 
6990 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
6991                                    const CCValAssign &VA, const SDLoc &DL,
6992                                    const RISCVSubtarget &Subtarget) {
6993   EVT LocVT = VA.getLocVT();
6994 
6995   switch (VA.getLocInfo()) {
6996   default:
6997     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6998   case CCValAssign::Full:
6999     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
7000       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
7001     break;
7002   case CCValAssign::BCvt:
7003     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
7004       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
7005     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
7006       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
7007     else
7008       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
7009     break;
7010   }
7011   return Val;
7012 }
7013 
7014 // The caller is responsible for loading the full value if the argument is
7015 // passed with CCValAssign::Indirect.
7016 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
7017                                 const CCValAssign &VA, const SDLoc &DL) {
7018   MachineFunction &MF = DAG.getMachineFunction();
7019   MachineFrameInfo &MFI = MF.getFrameInfo();
7020   EVT LocVT = VA.getLocVT();
7021   EVT ValVT = VA.getValVT();
7022   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
7023   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
7024                                  VA.getLocMemOffset(), /*Immutable=*/true);
7025   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7026   SDValue Val;
7027 
7028   ISD::LoadExtType ExtType;
7029   switch (VA.getLocInfo()) {
7030   default:
7031     llvm_unreachable("Unexpected CCValAssign::LocInfo");
7032   case CCValAssign::Full:
7033   case CCValAssign::Indirect:
7034   case CCValAssign::BCvt:
7035     ExtType = ISD::NON_EXTLOAD;
7036     break;
7037   }
7038   Val = DAG.getExtLoad(
7039       ExtType, DL, LocVT, Chain, FIN,
7040       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
7041   return Val;
7042 }
7043 
7044 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
7045                                        const CCValAssign &VA, const SDLoc &DL) {
7046   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
7047          "Unexpected VA");
7048   MachineFunction &MF = DAG.getMachineFunction();
7049   MachineFrameInfo &MFI = MF.getFrameInfo();
7050   MachineRegisterInfo &RegInfo = MF.getRegInfo();
7051 
7052   if (VA.isMemLoc()) {
7053     // f64 is passed on the stack.
7054     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7055     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7056     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7057                        MachinePointerInfo::getFixedStack(MF, FI));
7058   }
7059 
7060   assert(VA.isRegLoc() && "Expected register VA assignment");
7061 
7062   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7063   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7064   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7065   SDValue Hi;
7066   if (VA.getLocReg() == RISCV::X17) {
7067     // Second half of f64 is passed on the stack.
7068     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7069     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7070     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7071                      MachinePointerInfo::getFixedStack(MF, FI));
7072   } else {
7073     // Second half of f64 is passed in another GPR.
7074     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7075     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7076     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7077   }
7078   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7079 }
7080 
7081 // FastCC has less than 1% performance improvement for some particular
7082 // benchmark. But theoretically, it may has benenfit for some cases.
7083 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
7084                             CCValAssign::LocInfo LocInfo,
7085                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
7086 
7087   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7088     // X5 and X6 might be used for save-restore libcall.
7089     static const MCPhysReg GPRList[] = {
7090         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7091         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7092         RISCV::X29, RISCV::X30, RISCV::X31};
7093     if (unsigned Reg = State.AllocateReg(GPRList)) {
7094       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7095       return false;
7096     }
7097   }
7098 
7099   if (LocVT == MVT::f16) {
7100     static const MCPhysReg FPR16List[] = {
7101         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7102         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7103         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7104         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7105     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7106       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7107       return false;
7108     }
7109   }
7110 
7111   if (LocVT == MVT::f32) {
7112     static const MCPhysReg FPR32List[] = {
7113         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7114         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7115         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7116         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7117     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7118       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7119       return false;
7120     }
7121   }
7122 
7123   if (LocVT == MVT::f64) {
7124     static const MCPhysReg FPR64List[] = {
7125         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7126         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7127         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7128         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7129     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7130       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7131       return false;
7132     }
7133   }
7134 
7135   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7136     unsigned Offset4 = State.AllocateStack(4, Align(4));
7137     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7138     return false;
7139   }
7140 
7141   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7142     unsigned Offset5 = State.AllocateStack(8, Align(8));
7143     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7144     return false;
7145   }
7146 
7147   return true; // CC didn't match.
7148 }
7149 
7150 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7151                          CCValAssign::LocInfo LocInfo,
7152                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7153 
7154   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7155     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7156     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7157     static const MCPhysReg GPRList[] = {
7158         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7159         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7160     if (unsigned Reg = State.AllocateReg(GPRList)) {
7161       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7162       return false;
7163     }
7164   }
7165 
7166   if (LocVT == MVT::f32) {
7167     // Pass in STG registers: F1, ..., F6
7168     //                        fs0 ... fs5
7169     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7170                                           RISCV::F18_F, RISCV::F19_F,
7171                                           RISCV::F20_F, RISCV::F21_F};
7172     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7173       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7174       return false;
7175     }
7176   }
7177 
7178   if (LocVT == MVT::f64) {
7179     // Pass in STG registers: D1, ..., D6
7180     //                        fs6 ... fs11
7181     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7182                                           RISCV::F24_D, RISCV::F25_D,
7183                                           RISCV::F26_D, RISCV::F27_D};
7184     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7185       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7186       return false;
7187     }
7188   }
7189 
7190   report_fatal_error("No registers left in GHC calling convention");
7191   return true;
7192 }
7193 
7194 // Transform physical registers into virtual registers.
7195 SDValue RISCVTargetLowering::LowerFormalArguments(
7196     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7197     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7198     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7199 
7200   MachineFunction &MF = DAG.getMachineFunction();
7201 
7202   switch (CallConv) {
7203   default:
7204     report_fatal_error("Unsupported calling convention");
7205   case CallingConv::C:
7206   case CallingConv::Fast:
7207     break;
7208   case CallingConv::GHC:
7209     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7210         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7211       report_fatal_error(
7212         "GHC calling convention requires the F and D instruction set extensions");
7213   }
7214 
7215   const Function &Func = MF.getFunction();
7216   if (Func.hasFnAttribute("interrupt")) {
7217     if (!Func.arg_empty())
7218       report_fatal_error(
7219         "Functions with the interrupt attribute cannot have arguments!");
7220 
7221     StringRef Kind =
7222       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7223 
7224     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7225       report_fatal_error(
7226         "Function interrupt attribute argument not supported!");
7227   }
7228 
7229   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7230   MVT XLenVT = Subtarget.getXLenVT();
7231   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7232   // Used with vargs to acumulate store chains.
7233   std::vector<SDValue> OutChains;
7234 
7235   // Assign locations to all of the incoming arguments.
7236   SmallVector<CCValAssign, 16> ArgLocs;
7237   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7238 
7239   if (CallConv == CallingConv::Fast)
7240     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
7241   else if (CallConv == CallingConv::GHC)
7242     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7243   else
7244     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
7245 
7246   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7247     CCValAssign &VA = ArgLocs[i];
7248     SDValue ArgValue;
7249     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7250     // case.
7251     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7252       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7253     else if (VA.isRegLoc())
7254       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7255     else
7256       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7257 
7258     if (VA.getLocInfo() == CCValAssign::Indirect) {
7259       // If the original argument was split and passed by reference (e.g. i128
7260       // on RV32), we need to load all parts of it here (using the same
7261       // address). Vectors may be partly split to registers and partly to the
7262       // stack, in which case the base address is partly offset and subsequent
7263       // stores are relative to that.
7264       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7265                                    MachinePointerInfo()));
7266       unsigned ArgIndex = Ins[i].OrigArgIndex;
7267       unsigned ArgPartOffset = Ins[i].PartOffset;
7268       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7269       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7270         CCValAssign &PartVA = ArgLocs[i + 1];
7271         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7272         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
7273                                       DAG.getIntPtrConstant(PartOffset, DL));
7274         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7275                                      MachinePointerInfo()));
7276         ++i;
7277       }
7278       continue;
7279     }
7280     InVals.push_back(ArgValue);
7281   }
7282 
7283   if (IsVarArg) {
7284     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7285     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7286     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7287     MachineFrameInfo &MFI = MF.getFrameInfo();
7288     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7289     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7290 
7291     // Offset of the first variable argument from stack pointer, and size of
7292     // the vararg save area. For now, the varargs save area is either zero or
7293     // large enough to hold a0-a7.
7294     int VaArgOffset, VarArgsSaveSize;
7295 
7296     // If all registers are allocated, then all varargs must be passed on the
7297     // stack and we don't need to save any argregs.
7298     if (ArgRegs.size() == Idx) {
7299       VaArgOffset = CCInfo.getNextStackOffset();
7300       VarArgsSaveSize = 0;
7301     } else {
7302       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7303       VaArgOffset = -VarArgsSaveSize;
7304     }
7305 
7306     // Record the frame index of the first variable argument
7307     // which is a value necessary to VASTART.
7308     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7309     RVFI->setVarArgsFrameIndex(FI);
7310 
7311     // If saving an odd number of registers then create an extra stack slot to
7312     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7313     // offsets to even-numbered registered remain 2*XLEN-aligned.
7314     if (Idx % 2) {
7315       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7316       VarArgsSaveSize += XLenInBytes;
7317     }
7318 
7319     // Copy the integer registers that may have been used for passing varargs
7320     // to the vararg save area.
7321     for (unsigned I = Idx; I < ArgRegs.size();
7322          ++I, VaArgOffset += XLenInBytes) {
7323       const Register Reg = RegInfo.createVirtualRegister(RC);
7324       RegInfo.addLiveIn(ArgRegs[I], Reg);
7325       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7326       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7327       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7328       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7329                                    MachinePointerInfo::getFixedStack(MF, FI));
7330       cast<StoreSDNode>(Store.getNode())
7331           ->getMemOperand()
7332           ->setValue((Value *)nullptr);
7333       OutChains.push_back(Store);
7334     }
7335     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7336   }
7337 
7338   // All stores are grouped in one node to allow the matching between
7339   // the size of Ins and InVals. This only happens for vararg functions.
7340   if (!OutChains.empty()) {
7341     OutChains.push_back(Chain);
7342     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7343   }
7344 
7345   return Chain;
7346 }
7347 
7348 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7349 /// for tail call optimization.
7350 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7351 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7352     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7353     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7354 
7355   auto &Callee = CLI.Callee;
7356   auto CalleeCC = CLI.CallConv;
7357   auto &Outs = CLI.Outs;
7358   auto &Caller = MF.getFunction();
7359   auto CallerCC = Caller.getCallingConv();
7360 
7361   // Exception-handling functions need a special set of instructions to
7362   // indicate a return to the hardware. Tail-calling another function would
7363   // probably break this.
7364   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7365   // should be expanded as new function attributes are introduced.
7366   if (Caller.hasFnAttribute("interrupt"))
7367     return false;
7368 
7369   // Do not tail call opt if the stack is used to pass parameters.
7370   if (CCInfo.getNextStackOffset() != 0)
7371     return false;
7372 
7373   // Do not tail call opt if any parameters need to be passed indirectly.
7374   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7375   // passed indirectly. So the address of the value will be passed in a
7376   // register, or if not available, then the address is put on the stack. In
7377   // order to pass indirectly, space on the stack often needs to be allocated
7378   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7379   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7380   // are passed CCValAssign::Indirect.
7381   for (auto &VA : ArgLocs)
7382     if (VA.getLocInfo() == CCValAssign::Indirect)
7383       return false;
7384 
7385   // Do not tail call opt if either caller or callee uses struct return
7386   // semantics.
7387   auto IsCallerStructRet = Caller.hasStructRetAttr();
7388   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7389   if (IsCallerStructRet || IsCalleeStructRet)
7390     return false;
7391 
7392   // Externally-defined functions with weak linkage should not be
7393   // tail-called. The behaviour of branch instructions in this situation (as
7394   // used for tail calls) is implementation-defined, so we cannot rely on the
7395   // linker replacing the tail call with a return.
7396   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7397     const GlobalValue *GV = G->getGlobal();
7398     if (GV->hasExternalWeakLinkage())
7399       return false;
7400   }
7401 
7402   // The callee has to preserve all registers the caller needs to preserve.
7403   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7404   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7405   if (CalleeCC != CallerCC) {
7406     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7407     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7408       return false;
7409   }
7410 
7411   // Byval parameters hand the function a pointer directly into the stack area
7412   // we want to reuse during a tail call. Working around this *is* possible
7413   // but less efficient and uglier in LowerCall.
7414   for (auto &Arg : Outs)
7415     if (Arg.Flags.isByVal())
7416       return false;
7417 
7418   return true;
7419 }
7420 
7421 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7422   return DAG.getDataLayout().getPrefTypeAlign(
7423       VT.getTypeForEVT(*DAG.getContext()));
7424 }
7425 
7426 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7427 // and output parameter nodes.
7428 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7429                                        SmallVectorImpl<SDValue> &InVals) const {
7430   SelectionDAG &DAG = CLI.DAG;
7431   SDLoc &DL = CLI.DL;
7432   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7433   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7434   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7435   SDValue Chain = CLI.Chain;
7436   SDValue Callee = CLI.Callee;
7437   bool &IsTailCall = CLI.IsTailCall;
7438   CallingConv::ID CallConv = CLI.CallConv;
7439   bool IsVarArg = CLI.IsVarArg;
7440   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7441   MVT XLenVT = Subtarget.getXLenVT();
7442 
7443   MachineFunction &MF = DAG.getMachineFunction();
7444 
7445   // Analyze the operands of the call, assigning locations to each operand.
7446   SmallVector<CCValAssign, 16> ArgLocs;
7447   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7448 
7449   if (CallConv == CallingConv::Fast)
7450     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
7451   else if (CallConv == CallingConv::GHC)
7452     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7453   else
7454     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
7455 
7456   // Check if it's really possible to do a tail call.
7457   if (IsTailCall)
7458     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7459 
7460   if (IsTailCall)
7461     ++NumTailCalls;
7462   else if (CLI.CB && CLI.CB->isMustTailCall())
7463     report_fatal_error("failed to perform tail call elimination on a call "
7464                        "site marked musttail");
7465 
7466   // Get a count of how many bytes are to be pushed on the stack.
7467   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7468 
7469   // Create local copies for byval args
7470   SmallVector<SDValue, 8> ByValArgs;
7471   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7472     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7473     if (!Flags.isByVal())
7474       continue;
7475 
7476     SDValue Arg = OutVals[i];
7477     unsigned Size = Flags.getByValSize();
7478     Align Alignment = Flags.getNonZeroByValAlign();
7479 
7480     int FI =
7481         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7482     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7483     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7484 
7485     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7486                           /*IsVolatile=*/false,
7487                           /*AlwaysInline=*/false, IsTailCall,
7488                           MachinePointerInfo(), MachinePointerInfo());
7489     ByValArgs.push_back(FIPtr);
7490   }
7491 
7492   if (!IsTailCall)
7493     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7494 
7495   // Copy argument values to their designated locations.
7496   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7497   SmallVector<SDValue, 8> MemOpChains;
7498   SDValue StackPtr;
7499   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7500     CCValAssign &VA = ArgLocs[i];
7501     SDValue ArgValue = OutVals[i];
7502     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7503 
7504     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7505     bool IsF64OnRV32DSoftABI =
7506         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7507     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7508       SDValue SplitF64 = DAG.getNode(
7509           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7510       SDValue Lo = SplitF64.getValue(0);
7511       SDValue Hi = SplitF64.getValue(1);
7512 
7513       Register RegLo = VA.getLocReg();
7514       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7515 
7516       if (RegLo == RISCV::X17) {
7517         // Second half of f64 is passed on the stack.
7518         // Work out the address of the stack slot.
7519         if (!StackPtr.getNode())
7520           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7521         // Emit the store.
7522         MemOpChains.push_back(
7523             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7524       } else {
7525         // Second half of f64 is passed in another GPR.
7526         assert(RegLo < RISCV::X31 && "Invalid register pair");
7527         Register RegHigh = RegLo + 1;
7528         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7529       }
7530       continue;
7531     }
7532 
7533     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7534     // as any other MemLoc.
7535 
7536     // Promote the value if needed.
7537     // For now, only handle fully promoted and indirect arguments.
7538     if (VA.getLocInfo() == CCValAssign::Indirect) {
7539       // Store the argument in a stack slot and pass its address.
7540       Align StackAlign =
7541           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7542                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7543       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7544       // If the original argument was split (e.g. i128), we need
7545       // to store the required parts of it here (and pass just one address).
7546       // Vectors may be partly split to registers and partly to the stack, in
7547       // which case the base address is partly offset and subsequent stores are
7548       // relative to that.
7549       unsigned ArgIndex = Outs[i].OrigArgIndex;
7550       unsigned ArgPartOffset = Outs[i].PartOffset;
7551       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7552       // Calculate the total size to store. We don't have access to what we're
7553       // actually storing other than performing the loop and collecting the
7554       // info.
7555       SmallVector<std::pair<SDValue, unsigned>> Parts;
7556       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7557         SDValue PartValue = OutVals[i + 1];
7558         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7559         EVT PartVT = PartValue.getValueType();
7560         StoredSize += PartVT.getStoreSize();
7561         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7562         Parts.push_back(std::make_pair(PartValue, PartOffset));
7563         ++i;
7564       }
7565       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7566       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7567       MemOpChains.push_back(
7568           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7569                        MachinePointerInfo::getFixedStack(MF, FI)));
7570       for (const auto &Part : Parts) {
7571         SDValue PartValue = Part.first;
7572         unsigned PartOffset = Part.second;
7573         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
7574                                       DAG.getIntPtrConstant(PartOffset, DL));
7575         MemOpChains.push_back(
7576             DAG.getStore(Chain, DL, PartValue, Address,
7577                          MachinePointerInfo::getFixedStack(MF, FI)));
7578       }
7579       ArgValue = SpillSlot;
7580     } else {
7581       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7582     }
7583 
7584     // Use local copy if it is a byval arg.
7585     if (Flags.isByVal())
7586       ArgValue = ByValArgs[j++];
7587 
7588     if (VA.isRegLoc()) {
7589       // Queue up the argument copies and emit them at the end.
7590       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7591     } else {
7592       assert(VA.isMemLoc() && "Argument not register or memory");
7593       assert(!IsTailCall && "Tail call not allowed if stack is used "
7594                             "for passing parameters");
7595 
7596       // Work out the address of the stack slot.
7597       if (!StackPtr.getNode())
7598         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7599       SDValue Address =
7600           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7601                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7602 
7603       // Emit the store.
7604       MemOpChains.push_back(
7605           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7606     }
7607   }
7608 
7609   // Join the stores, which are independent of one another.
7610   if (!MemOpChains.empty())
7611     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7612 
7613   SDValue Glue;
7614 
7615   // Build a sequence of copy-to-reg nodes, chained and glued together.
7616   for (auto &Reg : RegsToPass) {
7617     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7618     Glue = Chain.getValue(1);
7619   }
7620 
7621   // Validate that none of the argument registers have been marked as
7622   // reserved, if so report an error. Do the same for the return address if this
7623   // is not a tailcall.
7624   validateCCReservedRegs(RegsToPass, MF);
7625   if (!IsTailCall &&
7626       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7627     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7628         MF.getFunction(),
7629         "Return address register required, but has been reserved."});
7630 
7631   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7632   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7633   // split it and then direct call can be matched by PseudoCALL.
7634   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7635     const GlobalValue *GV = S->getGlobal();
7636 
7637     unsigned OpFlags = RISCVII::MO_CALL;
7638     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7639       OpFlags = RISCVII::MO_PLT;
7640 
7641     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7642   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7643     unsigned OpFlags = RISCVII::MO_CALL;
7644 
7645     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7646                                                  nullptr))
7647       OpFlags = RISCVII::MO_PLT;
7648 
7649     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7650   }
7651 
7652   // The first call operand is the chain and the second is the target address.
7653   SmallVector<SDValue, 8> Ops;
7654   Ops.push_back(Chain);
7655   Ops.push_back(Callee);
7656 
7657   // Add argument registers to the end of the list so that they are
7658   // known live into the call.
7659   for (auto &Reg : RegsToPass)
7660     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7661 
7662   if (!IsTailCall) {
7663     // Add a register mask operand representing the call-preserved registers.
7664     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7665     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7666     assert(Mask && "Missing call preserved mask for calling convention");
7667     Ops.push_back(DAG.getRegisterMask(Mask));
7668   }
7669 
7670   // Glue the call to the argument copies, if any.
7671   if (Glue.getNode())
7672     Ops.push_back(Glue);
7673 
7674   // Emit the call.
7675   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7676 
7677   if (IsTailCall) {
7678     MF.getFrameInfo().setHasTailCall();
7679     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7680   }
7681 
7682   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7683   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7684   Glue = Chain.getValue(1);
7685 
7686   // Mark the end of the call, which is glued to the call itself.
7687   Chain = DAG.getCALLSEQ_END(Chain,
7688                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7689                              DAG.getConstant(0, DL, PtrVT, true),
7690                              Glue, DL);
7691   Glue = Chain.getValue(1);
7692 
7693   // Assign locations to each value returned by this call.
7694   SmallVector<CCValAssign, 16> RVLocs;
7695   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7696   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
7697 
7698   // Copy all of the result registers out of their specified physreg.
7699   for (auto &VA : RVLocs) {
7700     // Copy the value out
7701     SDValue RetValue =
7702         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7703     // Glue the RetValue to the end of the call sequence
7704     Chain = RetValue.getValue(1);
7705     Glue = RetValue.getValue(2);
7706 
7707     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7708       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7709       SDValue RetValue2 =
7710           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7711       Chain = RetValue2.getValue(1);
7712       Glue = RetValue2.getValue(2);
7713       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7714                              RetValue2);
7715     }
7716 
7717     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7718 
7719     InVals.push_back(RetValue);
7720   }
7721 
7722   return Chain;
7723 }
7724 
7725 bool RISCVTargetLowering::CanLowerReturn(
7726     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7727     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7728   SmallVector<CCValAssign, 16> RVLocs;
7729   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7730 
7731   Optional<unsigned> FirstMaskArgument;
7732   if (Subtarget.hasStdExtV())
7733     FirstMaskArgument = preAssignMask(Outs);
7734 
7735   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7736     MVT VT = Outs[i].VT;
7737     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7738     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7739     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7740                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7741                  *this, FirstMaskArgument))
7742       return false;
7743   }
7744   return true;
7745 }
7746 
7747 SDValue
7748 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7749                                  bool IsVarArg,
7750                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7751                                  const SmallVectorImpl<SDValue> &OutVals,
7752                                  const SDLoc &DL, SelectionDAG &DAG) const {
7753   const MachineFunction &MF = DAG.getMachineFunction();
7754   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7755 
7756   // Stores the assignment of the return value to a location.
7757   SmallVector<CCValAssign, 16> RVLocs;
7758 
7759   // Info about the registers and stack slot.
7760   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7761                  *DAG.getContext());
7762 
7763   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7764                     nullptr);
7765 
7766   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7767     report_fatal_error("GHC functions return void only");
7768 
7769   SDValue Glue;
7770   SmallVector<SDValue, 4> RetOps(1, Chain);
7771 
7772   // Copy the result values into the output registers.
7773   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7774     SDValue Val = OutVals[i];
7775     CCValAssign &VA = RVLocs[i];
7776     assert(VA.isRegLoc() && "Can only return in registers!");
7777 
7778     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7779       // Handle returning f64 on RV32D with a soft float ABI.
7780       assert(VA.isRegLoc() && "Expected return via registers");
7781       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7782                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7783       SDValue Lo = SplitF64.getValue(0);
7784       SDValue Hi = SplitF64.getValue(1);
7785       Register RegLo = VA.getLocReg();
7786       assert(RegLo < RISCV::X31 && "Invalid register pair");
7787       Register RegHi = RegLo + 1;
7788 
7789       if (STI.isRegisterReservedByUser(RegLo) ||
7790           STI.isRegisterReservedByUser(RegHi))
7791         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7792             MF.getFunction(),
7793             "Return value register required, but has been reserved."});
7794 
7795       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7796       Glue = Chain.getValue(1);
7797       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7798       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7799       Glue = Chain.getValue(1);
7800       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7801     } else {
7802       // Handle a 'normal' return.
7803       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7804       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7805 
7806       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7807         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7808             MF.getFunction(),
7809             "Return value register required, but has been reserved."});
7810 
7811       // Guarantee that all emitted copies are stuck together.
7812       Glue = Chain.getValue(1);
7813       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7814     }
7815   }
7816 
7817   RetOps[0] = Chain; // Update chain.
7818 
7819   // Add the glue node if we have it.
7820   if (Glue.getNode()) {
7821     RetOps.push_back(Glue);
7822   }
7823 
7824   // Interrupt service routines use different return instructions.
7825   const Function &Func = DAG.getMachineFunction().getFunction();
7826   if (Func.hasFnAttribute("interrupt")) {
7827     if (!Func.getReturnType()->isVoidTy())
7828       report_fatal_error(
7829           "Functions with the interrupt attribute must have void return type!");
7830 
7831     MachineFunction &MF = DAG.getMachineFunction();
7832     StringRef Kind =
7833       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7834 
7835     unsigned RetOpc;
7836     if (Kind == "user")
7837       RetOpc = RISCVISD::URET_FLAG;
7838     else if (Kind == "supervisor")
7839       RetOpc = RISCVISD::SRET_FLAG;
7840     else
7841       RetOpc = RISCVISD::MRET_FLAG;
7842 
7843     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7844   }
7845 
7846   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7847 }
7848 
7849 void RISCVTargetLowering::validateCCReservedRegs(
7850     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7851     MachineFunction &MF) const {
7852   const Function &F = MF.getFunction();
7853   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7854 
7855   if (llvm::any_of(Regs, [&STI](auto Reg) {
7856         return STI.isRegisterReservedByUser(Reg.first);
7857       }))
7858     F.getContext().diagnose(DiagnosticInfoUnsupported{
7859         F, "Argument register required, but has been reserved."});
7860 }
7861 
7862 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7863   return CI->isTailCall();
7864 }
7865 
7866 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7867 #define NODE_NAME_CASE(NODE)                                                   \
7868   case RISCVISD::NODE:                                                         \
7869     return "RISCVISD::" #NODE;
7870   // clang-format off
7871   switch ((RISCVISD::NodeType)Opcode) {
7872   case RISCVISD::FIRST_NUMBER:
7873     break;
7874   NODE_NAME_CASE(RET_FLAG)
7875   NODE_NAME_CASE(URET_FLAG)
7876   NODE_NAME_CASE(SRET_FLAG)
7877   NODE_NAME_CASE(MRET_FLAG)
7878   NODE_NAME_CASE(CALL)
7879   NODE_NAME_CASE(SELECT_CC)
7880   NODE_NAME_CASE(BR_CC)
7881   NODE_NAME_CASE(BuildPairF64)
7882   NODE_NAME_CASE(SplitF64)
7883   NODE_NAME_CASE(TAIL)
7884   NODE_NAME_CASE(MULHSU)
7885   NODE_NAME_CASE(SLLW)
7886   NODE_NAME_CASE(SRAW)
7887   NODE_NAME_CASE(SRLW)
7888   NODE_NAME_CASE(DIVW)
7889   NODE_NAME_CASE(DIVUW)
7890   NODE_NAME_CASE(REMUW)
7891   NODE_NAME_CASE(ROLW)
7892   NODE_NAME_CASE(RORW)
7893   NODE_NAME_CASE(CLZW)
7894   NODE_NAME_CASE(CTZW)
7895   NODE_NAME_CASE(FSLW)
7896   NODE_NAME_CASE(FSRW)
7897   NODE_NAME_CASE(FSL)
7898   NODE_NAME_CASE(FSR)
7899   NODE_NAME_CASE(FMV_H_X)
7900   NODE_NAME_CASE(FMV_X_ANYEXTH)
7901   NODE_NAME_CASE(FMV_W_X_RV64)
7902   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
7903   NODE_NAME_CASE(READ_CYCLE_WIDE)
7904   NODE_NAME_CASE(GREV)
7905   NODE_NAME_CASE(GREVW)
7906   NODE_NAME_CASE(GORC)
7907   NODE_NAME_CASE(GORCW)
7908   NODE_NAME_CASE(SHFL)
7909   NODE_NAME_CASE(SHFLW)
7910   NODE_NAME_CASE(UNSHFL)
7911   NODE_NAME_CASE(UNSHFLW)
7912   NODE_NAME_CASE(BCOMPRESS)
7913   NODE_NAME_CASE(BCOMPRESSW)
7914   NODE_NAME_CASE(BDECOMPRESS)
7915   NODE_NAME_CASE(BDECOMPRESSW)
7916   NODE_NAME_CASE(VMV_V_X_VL)
7917   NODE_NAME_CASE(VFMV_V_F_VL)
7918   NODE_NAME_CASE(VMV_X_S)
7919   NODE_NAME_CASE(VMV_S_X_VL)
7920   NODE_NAME_CASE(VFMV_S_F_VL)
7921   NODE_NAME_CASE(SPLAT_VECTOR_I64)
7922   NODE_NAME_CASE(READ_VLENB)
7923   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
7924   NODE_NAME_CASE(VSLIDEUP_VL)
7925   NODE_NAME_CASE(VSLIDE1UP_VL)
7926   NODE_NAME_CASE(VSLIDEDOWN_VL)
7927   NODE_NAME_CASE(VSLIDE1DOWN_VL)
7928   NODE_NAME_CASE(VID_VL)
7929   NODE_NAME_CASE(VFNCVT_ROD_VL)
7930   NODE_NAME_CASE(VECREDUCE_ADD_VL)
7931   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
7932   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
7933   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
7934   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
7935   NODE_NAME_CASE(VECREDUCE_AND_VL)
7936   NODE_NAME_CASE(VECREDUCE_OR_VL)
7937   NODE_NAME_CASE(VECREDUCE_XOR_VL)
7938   NODE_NAME_CASE(VECREDUCE_FADD_VL)
7939   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
7940   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
7941   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
7942   NODE_NAME_CASE(ADD_VL)
7943   NODE_NAME_CASE(AND_VL)
7944   NODE_NAME_CASE(MUL_VL)
7945   NODE_NAME_CASE(OR_VL)
7946   NODE_NAME_CASE(SDIV_VL)
7947   NODE_NAME_CASE(SHL_VL)
7948   NODE_NAME_CASE(SREM_VL)
7949   NODE_NAME_CASE(SRA_VL)
7950   NODE_NAME_CASE(SRL_VL)
7951   NODE_NAME_CASE(SUB_VL)
7952   NODE_NAME_CASE(UDIV_VL)
7953   NODE_NAME_CASE(UREM_VL)
7954   NODE_NAME_CASE(XOR_VL)
7955   NODE_NAME_CASE(FADD_VL)
7956   NODE_NAME_CASE(FSUB_VL)
7957   NODE_NAME_CASE(FMUL_VL)
7958   NODE_NAME_CASE(FDIV_VL)
7959   NODE_NAME_CASE(FNEG_VL)
7960   NODE_NAME_CASE(FABS_VL)
7961   NODE_NAME_CASE(FSQRT_VL)
7962   NODE_NAME_CASE(FMA_VL)
7963   NODE_NAME_CASE(FCOPYSIGN_VL)
7964   NODE_NAME_CASE(SMIN_VL)
7965   NODE_NAME_CASE(SMAX_VL)
7966   NODE_NAME_CASE(UMIN_VL)
7967   NODE_NAME_CASE(UMAX_VL)
7968   NODE_NAME_CASE(FMINNUM_VL)
7969   NODE_NAME_CASE(FMAXNUM_VL)
7970   NODE_NAME_CASE(MULHS_VL)
7971   NODE_NAME_CASE(MULHU_VL)
7972   NODE_NAME_CASE(FP_TO_SINT_VL)
7973   NODE_NAME_CASE(FP_TO_UINT_VL)
7974   NODE_NAME_CASE(SINT_TO_FP_VL)
7975   NODE_NAME_CASE(UINT_TO_FP_VL)
7976   NODE_NAME_CASE(FP_EXTEND_VL)
7977   NODE_NAME_CASE(FP_ROUND_VL)
7978   NODE_NAME_CASE(SETCC_VL)
7979   NODE_NAME_CASE(VSELECT_VL)
7980   NODE_NAME_CASE(VMAND_VL)
7981   NODE_NAME_CASE(VMOR_VL)
7982   NODE_NAME_CASE(VMXOR_VL)
7983   NODE_NAME_CASE(VMCLR_VL)
7984   NODE_NAME_CASE(VMSET_VL)
7985   NODE_NAME_CASE(VRGATHER_VX_VL)
7986   NODE_NAME_CASE(VRGATHER_VV_VL)
7987   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
7988   NODE_NAME_CASE(VSEXT_VL)
7989   NODE_NAME_CASE(VZEXT_VL)
7990   NODE_NAME_CASE(VPOPC_VL)
7991   NODE_NAME_CASE(VLE_VL)
7992   NODE_NAME_CASE(VSE_VL)
7993   NODE_NAME_CASE(READ_CSR)
7994   NODE_NAME_CASE(WRITE_CSR)
7995   NODE_NAME_CASE(SWAP_CSR)
7996   }
7997   // clang-format on
7998   return nullptr;
7999 #undef NODE_NAME_CASE
8000 }
8001 
8002 /// getConstraintType - Given a constraint letter, return the type of
8003 /// constraint it is for this target.
8004 RISCVTargetLowering::ConstraintType
8005 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
8006   if (Constraint.size() == 1) {
8007     switch (Constraint[0]) {
8008     default:
8009       break;
8010     case 'f':
8011     case 'v':
8012       return C_RegisterClass;
8013     case 'I':
8014     case 'J':
8015     case 'K':
8016       return C_Immediate;
8017     case 'A':
8018       return C_Memory;
8019     }
8020   }
8021   return TargetLowering::getConstraintType(Constraint);
8022 }
8023 
8024 std::pair<unsigned, const TargetRegisterClass *>
8025 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8026                                                   StringRef Constraint,
8027                                                   MVT VT) const {
8028   // First, see if this is a constraint that directly corresponds to a
8029   // RISCV register class.
8030   if (Constraint.size() == 1) {
8031     switch (Constraint[0]) {
8032     case 'r':
8033       return std::make_pair(0U, &RISCV::GPRRegClass);
8034     case 'f':
8035       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8036         return std::make_pair(0U, &RISCV::FPR16RegClass);
8037       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8038         return std::make_pair(0U, &RISCV::FPR32RegClass);
8039       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8040         return std::make_pair(0U, &RISCV::FPR64RegClass);
8041       break;
8042     case 'v':
8043       for (const auto *RC :
8044            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8045             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8046         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8047           return std::make_pair(0U, RC);
8048       }
8049       break;
8050     default:
8051       break;
8052     }
8053   }
8054 
8055   // Clang will correctly decode the usage of register name aliases into their
8056   // official names. However, other frontends like `rustc` do not. This allows
8057   // users of these frontends to use the ABI names for registers in LLVM-style
8058   // register constraints.
8059   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8060                                .Case("{zero}", RISCV::X0)
8061                                .Case("{ra}", RISCV::X1)
8062                                .Case("{sp}", RISCV::X2)
8063                                .Case("{gp}", RISCV::X3)
8064                                .Case("{tp}", RISCV::X4)
8065                                .Case("{t0}", RISCV::X5)
8066                                .Case("{t1}", RISCV::X6)
8067                                .Case("{t2}", RISCV::X7)
8068                                .Cases("{s0}", "{fp}", RISCV::X8)
8069                                .Case("{s1}", RISCV::X9)
8070                                .Case("{a0}", RISCV::X10)
8071                                .Case("{a1}", RISCV::X11)
8072                                .Case("{a2}", RISCV::X12)
8073                                .Case("{a3}", RISCV::X13)
8074                                .Case("{a4}", RISCV::X14)
8075                                .Case("{a5}", RISCV::X15)
8076                                .Case("{a6}", RISCV::X16)
8077                                .Case("{a7}", RISCV::X17)
8078                                .Case("{s2}", RISCV::X18)
8079                                .Case("{s3}", RISCV::X19)
8080                                .Case("{s4}", RISCV::X20)
8081                                .Case("{s5}", RISCV::X21)
8082                                .Case("{s6}", RISCV::X22)
8083                                .Case("{s7}", RISCV::X23)
8084                                .Case("{s8}", RISCV::X24)
8085                                .Case("{s9}", RISCV::X25)
8086                                .Case("{s10}", RISCV::X26)
8087                                .Case("{s11}", RISCV::X27)
8088                                .Case("{t3}", RISCV::X28)
8089                                .Case("{t4}", RISCV::X29)
8090                                .Case("{t5}", RISCV::X30)
8091                                .Case("{t6}", RISCV::X31)
8092                                .Default(RISCV::NoRegister);
8093   if (XRegFromAlias != RISCV::NoRegister)
8094     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8095 
8096   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8097   // TableGen record rather than the AsmName to choose registers for InlineAsm
8098   // constraints, plus we want to match those names to the widest floating point
8099   // register type available, manually select floating point registers here.
8100   //
8101   // The second case is the ABI name of the register, so that frontends can also
8102   // use the ABI names in register constraint lists.
8103   if (Subtarget.hasStdExtF()) {
8104     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8105                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8106                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8107                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8108                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8109                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8110                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8111                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8112                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8113                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8114                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8115                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8116                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8117                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8118                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8119                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8120                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8121                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8122                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8123                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8124                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8125                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8126                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8127                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8128                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8129                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8130                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8131                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8132                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8133                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8134                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8135                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8136                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8137                         .Default(RISCV::NoRegister);
8138     if (FReg != RISCV::NoRegister) {
8139       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8140       if (Subtarget.hasStdExtD()) {
8141         unsigned RegNo = FReg - RISCV::F0_F;
8142         unsigned DReg = RISCV::F0_D + RegNo;
8143         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8144       }
8145       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8146     }
8147   }
8148 
8149   if (Subtarget.hasStdExtV()) {
8150     Register VReg = StringSwitch<Register>(Constraint.lower())
8151                         .Case("{v0}", RISCV::V0)
8152                         .Case("{v1}", RISCV::V1)
8153                         .Case("{v2}", RISCV::V2)
8154                         .Case("{v3}", RISCV::V3)
8155                         .Case("{v4}", RISCV::V4)
8156                         .Case("{v5}", RISCV::V5)
8157                         .Case("{v6}", RISCV::V6)
8158                         .Case("{v7}", RISCV::V7)
8159                         .Case("{v8}", RISCV::V8)
8160                         .Case("{v9}", RISCV::V9)
8161                         .Case("{v10}", RISCV::V10)
8162                         .Case("{v11}", RISCV::V11)
8163                         .Case("{v12}", RISCV::V12)
8164                         .Case("{v13}", RISCV::V13)
8165                         .Case("{v14}", RISCV::V14)
8166                         .Case("{v15}", RISCV::V15)
8167                         .Case("{v16}", RISCV::V16)
8168                         .Case("{v17}", RISCV::V17)
8169                         .Case("{v18}", RISCV::V18)
8170                         .Case("{v19}", RISCV::V19)
8171                         .Case("{v20}", RISCV::V20)
8172                         .Case("{v21}", RISCV::V21)
8173                         .Case("{v22}", RISCV::V22)
8174                         .Case("{v23}", RISCV::V23)
8175                         .Case("{v24}", RISCV::V24)
8176                         .Case("{v25}", RISCV::V25)
8177                         .Case("{v26}", RISCV::V26)
8178                         .Case("{v27}", RISCV::V27)
8179                         .Case("{v28}", RISCV::V28)
8180                         .Case("{v29}", RISCV::V29)
8181                         .Case("{v30}", RISCV::V30)
8182                         .Case("{v31}", RISCV::V31)
8183                         .Default(RISCV::NoRegister);
8184     if (VReg != RISCV::NoRegister) {
8185       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8186         return std::make_pair(VReg, &RISCV::VMRegClass);
8187       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8188         return std::make_pair(VReg, &RISCV::VRRegClass);
8189       for (const auto *RC :
8190            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8191         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8192           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8193           return std::make_pair(VReg, RC);
8194         }
8195       }
8196     }
8197   }
8198 
8199   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8200 }
8201 
8202 unsigned
8203 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8204   // Currently only support length 1 constraints.
8205   if (ConstraintCode.size() == 1) {
8206     switch (ConstraintCode[0]) {
8207     case 'A':
8208       return InlineAsm::Constraint_A;
8209     default:
8210       break;
8211     }
8212   }
8213 
8214   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8215 }
8216 
8217 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8218     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8219     SelectionDAG &DAG) const {
8220   // Currently only support length 1 constraints.
8221   if (Constraint.length() == 1) {
8222     switch (Constraint[0]) {
8223     case 'I':
8224       // Validate & create a 12-bit signed immediate operand.
8225       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8226         uint64_t CVal = C->getSExtValue();
8227         if (isInt<12>(CVal))
8228           Ops.push_back(
8229               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8230       }
8231       return;
8232     case 'J':
8233       // Validate & create an integer zero operand.
8234       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8235         if (C->getZExtValue() == 0)
8236           Ops.push_back(
8237               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8238       return;
8239     case 'K':
8240       // Validate & create a 5-bit unsigned immediate operand.
8241       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8242         uint64_t CVal = C->getZExtValue();
8243         if (isUInt<5>(CVal))
8244           Ops.push_back(
8245               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8246       }
8247       return;
8248     default:
8249       break;
8250     }
8251   }
8252   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8253 }
8254 
8255 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
8256                                                    Instruction *Inst,
8257                                                    AtomicOrdering Ord) const {
8258   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8259     return Builder.CreateFence(Ord);
8260   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8261     return Builder.CreateFence(AtomicOrdering::Release);
8262   return nullptr;
8263 }
8264 
8265 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
8266                                                     Instruction *Inst,
8267                                                     AtomicOrdering Ord) const {
8268   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8269     return Builder.CreateFence(AtomicOrdering::Acquire);
8270   return nullptr;
8271 }
8272 
8273 TargetLowering::AtomicExpansionKind
8274 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8275   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8276   // point operations can't be used in an lr/sc sequence without breaking the
8277   // forward-progress guarantee.
8278   if (AI->isFloatingPointOperation())
8279     return AtomicExpansionKind::CmpXChg;
8280 
8281   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8282   if (Size == 8 || Size == 16)
8283     return AtomicExpansionKind::MaskedIntrinsic;
8284   return AtomicExpansionKind::None;
8285 }
8286 
8287 static Intrinsic::ID
8288 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8289   if (XLen == 32) {
8290     switch (BinOp) {
8291     default:
8292       llvm_unreachable("Unexpected AtomicRMW BinOp");
8293     case AtomicRMWInst::Xchg:
8294       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8295     case AtomicRMWInst::Add:
8296       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8297     case AtomicRMWInst::Sub:
8298       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8299     case AtomicRMWInst::Nand:
8300       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8301     case AtomicRMWInst::Max:
8302       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8303     case AtomicRMWInst::Min:
8304       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8305     case AtomicRMWInst::UMax:
8306       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8307     case AtomicRMWInst::UMin:
8308       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8309     }
8310   }
8311 
8312   if (XLen == 64) {
8313     switch (BinOp) {
8314     default:
8315       llvm_unreachable("Unexpected AtomicRMW BinOp");
8316     case AtomicRMWInst::Xchg:
8317       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8318     case AtomicRMWInst::Add:
8319       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8320     case AtomicRMWInst::Sub:
8321       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8322     case AtomicRMWInst::Nand:
8323       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8324     case AtomicRMWInst::Max:
8325       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8326     case AtomicRMWInst::Min:
8327       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8328     case AtomicRMWInst::UMax:
8329       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8330     case AtomicRMWInst::UMin:
8331       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8332     }
8333   }
8334 
8335   llvm_unreachable("Unexpected XLen\n");
8336 }
8337 
8338 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8339     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8340     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8341   unsigned XLen = Subtarget.getXLen();
8342   Value *Ordering =
8343       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8344   Type *Tys[] = {AlignedAddr->getType()};
8345   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8346       AI->getModule(),
8347       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8348 
8349   if (XLen == 64) {
8350     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8351     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8352     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8353   }
8354 
8355   Value *Result;
8356 
8357   // Must pass the shift amount needed to sign extend the loaded value prior
8358   // to performing a signed comparison for min/max. ShiftAmt is the number of
8359   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8360   // is the number of bits to left+right shift the value in order to
8361   // sign-extend.
8362   if (AI->getOperation() == AtomicRMWInst::Min ||
8363       AI->getOperation() == AtomicRMWInst::Max) {
8364     const DataLayout &DL = AI->getModule()->getDataLayout();
8365     unsigned ValWidth =
8366         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8367     Value *SextShamt =
8368         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8369     Result = Builder.CreateCall(LrwOpScwLoop,
8370                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8371   } else {
8372     Result =
8373         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8374   }
8375 
8376   if (XLen == 64)
8377     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8378   return Result;
8379 }
8380 
8381 TargetLowering::AtomicExpansionKind
8382 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8383     AtomicCmpXchgInst *CI) const {
8384   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8385   if (Size == 8 || Size == 16)
8386     return AtomicExpansionKind::MaskedIntrinsic;
8387   return AtomicExpansionKind::None;
8388 }
8389 
8390 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8391     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8392     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8393   unsigned XLen = Subtarget.getXLen();
8394   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8395   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8396   if (XLen == 64) {
8397     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8398     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8399     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8400     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8401   }
8402   Type *Tys[] = {AlignedAddr->getType()};
8403   Function *MaskedCmpXchg =
8404       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8405   Value *Result = Builder.CreateCall(
8406       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8407   if (XLen == 64)
8408     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8409   return Result;
8410 }
8411 
8412 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8413   return false;
8414 }
8415 
8416 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8417                                                      EVT VT) const {
8418   VT = VT.getScalarType();
8419 
8420   if (!VT.isSimple())
8421     return false;
8422 
8423   switch (VT.getSimpleVT().SimpleTy) {
8424   case MVT::f16:
8425     return Subtarget.hasStdExtZfh();
8426   case MVT::f32:
8427     return Subtarget.hasStdExtF();
8428   case MVT::f64:
8429     return Subtarget.hasStdExtD();
8430   default:
8431     break;
8432   }
8433 
8434   return false;
8435 }
8436 
8437 Register RISCVTargetLowering::getExceptionPointerRegister(
8438     const Constant *PersonalityFn) const {
8439   return RISCV::X10;
8440 }
8441 
8442 Register RISCVTargetLowering::getExceptionSelectorRegister(
8443     const Constant *PersonalityFn) const {
8444   return RISCV::X11;
8445 }
8446 
8447 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8448   // Return false to suppress the unnecessary extensions if the LibCall
8449   // arguments or return value is f32 type for LP64 ABI.
8450   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8451   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8452     return false;
8453 
8454   return true;
8455 }
8456 
8457 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8458   if (Subtarget.is64Bit() && Type == MVT::i32)
8459     return true;
8460 
8461   return IsSigned;
8462 }
8463 
8464 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8465                                                  SDValue C) const {
8466   // Check integral scalar types.
8467   if (VT.isScalarInteger()) {
8468     // Omit the optimization if the sub target has the M extension and the data
8469     // size exceeds XLen.
8470     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8471       return false;
8472     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8473       // Break the MUL to a SLLI and an ADD/SUB.
8474       const APInt &Imm = ConstNode->getAPIntValue();
8475       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8476           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8477         return true;
8478       // Omit the following optimization if the sub target has the M extension
8479       // and the data size >= XLen.
8480       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8481         return false;
8482       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8483       // a pair of LUI/ADDI.
8484       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8485         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8486         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8487             (1 - ImmS).isPowerOf2())
8488         return true;
8489       }
8490     }
8491   }
8492 
8493   return false;
8494 }
8495 
8496 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8497     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8498     bool *Fast) const {
8499   if (!VT.isScalableVector())
8500     return false;
8501 
8502   EVT ElemVT = VT.getVectorElementType();
8503   if (Alignment >= ElemVT.getStoreSize()) {
8504     if (Fast)
8505       *Fast = true;
8506     return true;
8507   }
8508 
8509   return false;
8510 }
8511 
8512 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8513     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8514     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8515   bool IsABIRegCopy = CC.hasValue();
8516   EVT ValueVT = Val.getValueType();
8517   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8518     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8519     // and cast to f32.
8520     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8521     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8522     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8523                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8524     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8525     Parts[0] = Val;
8526     return true;
8527   }
8528 
8529   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8530     LLVMContext &Context = *DAG.getContext();
8531     EVT ValueEltVT = ValueVT.getVectorElementType();
8532     EVT PartEltVT = PartVT.getVectorElementType();
8533     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8534     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8535     if (PartVTBitSize % ValueVTBitSize == 0) {
8536       // If the element types are different, bitcast to the same element type of
8537       // PartVT first.
8538       if (ValueEltVT != PartEltVT) {
8539         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8540         assert(Count != 0 && "The number of element should not be zero.");
8541         EVT SameEltTypeVT =
8542             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8543         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8544       }
8545       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8546                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8547       Parts[0] = Val;
8548       return true;
8549     }
8550   }
8551   return false;
8552 }
8553 
8554 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8555     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8556     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8557   bool IsABIRegCopy = CC.hasValue();
8558   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8559     SDValue Val = Parts[0];
8560 
8561     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8562     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8563     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8564     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8565     return Val;
8566   }
8567 
8568   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8569     LLVMContext &Context = *DAG.getContext();
8570     SDValue Val = Parts[0];
8571     EVT ValueEltVT = ValueVT.getVectorElementType();
8572     EVT PartEltVT = PartVT.getVectorElementType();
8573     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8574     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8575     if (PartVTBitSize % ValueVTBitSize == 0) {
8576       EVT SameEltTypeVT = ValueVT;
8577       // If the element types are different, convert it to the same element type
8578       // of PartVT.
8579       if (ValueEltVT != PartEltVT) {
8580         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8581         assert(Count != 0 && "The number of element should not be zero.");
8582         SameEltTypeVT =
8583             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8584       }
8585       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8586                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8587       if (ValueEltVT != PartEltVT)
8588         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8589       return Val;
8590     }
8591   }
8592   return SDValue();
8593 }
8594 
8595 #define GET_REGISTER_MATCHER
8596 #include "RISCVGenAsmMatcher.inc"
8597 
8598 Register
8599 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8600                                        const MachineFunction &MF) const {
8601   Register Reg = MatchRegisterAltName(RegName);
8602   if (Reg == RISCV::NoRegister)
8603     Reg = MatchRegisterName(RegName);
8604   if (Reg == RISCV::NoRegister)
8605     report_fatal_error(
8606         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8607   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8608   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8609     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8610                              StringRef(RegName) + "\"."));
8611   return Reg;
8612 }
8613 
8614 namespace llvm {
8615 namespace RISCVVIntrinsicsTable {
8616 
8617 #define GET_RISCVVIntrinsicsTable_IMPL
8618 #include "RISCVGenSearchableTables.inc"
8619 
8620 } // namespace RISCVVIntrinsicsTable
8621 
8622 } // namespace llvm
8623