1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/CodeGen/ValueTypes.h"
29 #include "llvm/IR/DiagnosticInfo.h"
30 #include "llvm/IR/DiagnosticPrinter.h"
31 #include "llvm/IR/IntrinsicsRISCV.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/KnownBits.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "riscv-lower"
41 
42 STATISTIC(NumTailCalls, "Number of tail calls");
43 
44 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
45                                          const RISCVSubtarget &STI)
46     : TargetLowering(TM), Subtarget(STI) {
47 
48   if (Subtarget.isRV32E())
49     report_fatal_error("Codegen not yet implemented for RV32E");
50 
51   RISCVABI::ABI ABI = Subtarget.getTargetABI();
52   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
53 
54   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
55       !Subtarget.hasStdExtF()) {
56     errs() << "Hard-float 'f' ABI can't be used for a target that "
57                 "doesn't support the F instruction set extension (ignoring "
58                           "target-abi)\n";
59     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
60   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
61              !Subtarget.hasStdExtD()) {
62     errs() << "Hard-float 'd' ABI can't be used for a target that "
63               "doesn't support the D instruction set extension (ignoring "
64               "target-abi)\n";
65     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
66   }
67 
68   switch (ABI) {
69   default:
70     report_fatal_error("Don't know how to lower this ABI");
71   case RISCVABI::ABI_ILP32:
72   case RISCVABI::ABI_ILP32F:
73   case RISCVABI::ABI_ILP32D:
74   case RISCVABI::ABI_LP64:
75   case RISCVABI::ABI_LP64F:
76   case RISCVABI::ABI_LP64D:
77     break;
78   }
79 
80   MVT XLenVT = Subtarget.getXLenVT();
81 
82   // Set up the register classes.
83   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
84 
85   if (Subtarget.hasStdExtZfh())
86     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
87   if (Subtarget.hasStdExtF())
88     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
89   if (Subtarget.hasStdExtD())
90     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
91 
92   static const MVT::SimpleValueType BoolVecVTs[] = {
93       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
94       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
95   static const MVT::SimpleValueType IntVecVTs[] = {
96       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
97       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
98       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
99       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
100       MVT::nxv4i64, MVT::nxv8i64};
101   static const MVT::SimpleValueType F16VecVTs[] = {
102       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
103       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
104   static const MVT::SimpleValueType F32VecVTs[] = {
105       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
106   static const MVT::SimpleValueType F64VecVTs[] = {
107       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
108 
109   if (Subtarget.hasStdExtV()) {
110     auto addRegClassForRVV = [this](MVT VT) {
111       unsigned Size = VT.getSizeInBits().getKnownMinValue();
112       assert(Size <= 512 && isPowerOf2_32(Size));
113       const TargetRegisterClass *RC;
114       if (Size <= 64)
115         RC = &RISCV::VRRegClass;
116       else if (Size == 128)
117         RC = &RISCV::VRM2RegClass;
118       else if (Size == 256)
119         RC = &RISCV::VRM4RegClass;
120       else
121         RC = &RISCV::VRM8RegClass;
122 
123       addRegisterClass(VT, RC);
124     };
125 
126     for (MVT VT : BoolVecVTs)
127       addRegClassForRVV(VT);
128     for (MVT VT : IntVecVTs)
129       addRegClassForRVV(VT);
130 
131     if (Subtarget.hasStdExtZfh())
132       for (MVT VT : F16VecVTs)
133         addRegClassForRVV(VT);
134 
135     if (Subtarget.hasStdExtF())
136       for (MVT VT : F32VecVTs)
137         addRegClassForRVV(VT);
138 
139     if (Subtarget.hasStdExtD())
140       for (MVT VT : F64VecVTs)
141         addRegClassForRVV(VT);
142 
143     if (Subtarget.useRVVForFixedLengthVectors()) {
144       auto addRegClassForFixedVectors = [this](MVT VT) {
145         MVT ContainerVT = getContainerForFixedLengthVector(VT);
146         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
147         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
148         addRegisterClass(VT, TRI.getRegClass(RCID));
149       };
150       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
151         if (useRVVForFixedLengthVectorVT(VT))
152           addRegClassForFixedVectors(VT);
153 
154       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
155         if (useRVVForFixedLengthVectorVT(VT))
156           addRegClassForFixedVectors(VT);
157     }
158   }
159 
160   // Compute derived properties from the register classes.
161   computeRegisterProperties(STI.getRegisterInfo());
162 
163   setStackPointerRegisterToSaveRestore(RISCV::X2);
164 
165   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
166     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
167 
168   // TODO: add all necessary setOperationAction calls.
169   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
170 
171   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
172   setOperationAction(ISD::BR_CC, XLenVT, Expand);
173   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
174   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
175 
176   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
177   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
178 
179   setOperationAction(ISD::VASTART, MVT::Other, Custom);
180   setOperationAction(ISD::VAARG, MVT::Other, Expand);
181   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
182   setOperationAction(ISD::VAEND, MVT::Other, Expand);
183 
184   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
185   if (!Subtarget.hasStdExtZbb()) {
186     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
188   }
189 
190   if (Subtarget.is64Bit()) {
191     setOperationAction(ISD::ADD, MVT::i32, Custom);
192     setOperationAction(ISD::SUB, MVT::i32, Custom);
193     setOperationAction(ISD::SHL, MVT::i32, Custom);
194     setOperationAction(ISD::SRA, MVT::i32, Custom);
195     setOperationAction(ISD::SRL, MVT::i32, Custom);
196 
197     setOperationAction(ISD::UADDO, MVT::i32, Custom);
198     setOperationAction(ISD::USUBO, MVT::i32, Custom);
199     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
200     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
201   }
202 
203   if (!Subtarget.hasStdExtM()) {
204     setOperationAction(ISD::MUL, XLenVT, Expand);
205     setOperationAction(ISD::MULHS, XLenVT, Expand);
206     setOperationAction(ISD::MULHU, XLenVT, Expand);
207     setOperationAction(ISD::SDIV, XLenVT, Expand);
208     setOperationAction(ISD::UDIV, XLenVT, Expand);
209     setOperationAction(ISD::SREM, XLenVT, Expand);
210     setOperationAction(ISD::UREM, XLenVT, Expand);
211   } else {
212     if (Subtarget.is64Bit()) {
213       setOperationAction(ISD::MUL, MVT::i32, Custom);
214       setOperationAction(ISD::MUL, MVT::i128, Custom);
215 
216       setOperationAction(ISD::SDIV, MVT::i8, Custom);
217       setOperationAction(ISD::UDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UREM, MVT::i8, Custom);
219       setOperationAction(ISD::SDIV, MVT::i16, Custom);
220       setOperationAction(ISD::UDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UREM, MVT::i16, Custom);
222       setOperationAction(ISD::SDIV, MVT::i32, Custom);
223       setOperationAction(ISD::UDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UREM, MVT::i32, Custom);
225     } else {
226       setOperationAction(ISD::MUL, MVT::i64, Custom);
227     }
228   }
229 
230   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
231   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
233   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
234 
235   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
236   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
238 
239   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
240     if (Subtarget.is64Bit()) {
241       setOperationAction(ISD::ROTL, MVT::i32, Custom);
242       setOperationAction(ISD::ROTR, MVT::i32, Custom);
243     }
244   } else {
245     setOperationAction(ISD::ROTL, XLenVT, Expand);
246     setOperationAction(ISD::ROTR, XLenVT, Expand);
247   }
248 
249   if (Subtarget.hasStdExtZbp()) {
250     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
251     // more combining.
252     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
253     setOperationAction(ISD::BSWAP, XLenVT, Custom);
254 
255     if (Subtarget.is64Bit()) {
256       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
257       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
258     }
259   } else {
260     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
261     // pattern match it directly in isel.
262     setOperationAction(ISD::BSWAP, XLenVT,
263                        Subtarget.hasStdExtZbb() ? Legal : Expand);
264   }
265 
266   if (Subtarget.hasStdExtZbb()) {
267     setOperationAction(ISD::SMIN, XLenVT, Legal);
268     setOperationAction(ISD::SMAX, XLenVT, Legal);
269     setOperationAction(ISD::UMIN, XLenVT, Legal);
270     setOperationAction(ISD::UMAX, XLenVT, Legal);
271 
272     if (Subtarget.is64Bit()) {
273       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
274       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
275       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
276       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
277     }
278   } else {
279     setOperationAction(ISD::CTTZ, XLenVT, Expand);
280     setOperationAction(ISD::CTLZ, XLenVT, Expand);
281     setOperationAction(ISD::CTPOP, XLenVT, Expand);
282   }
283 
284   if (Subtarget.hasStdExtZbt()) {
285     setOperationAction(ISD::FSHL, XLenVT, Custom);
286     setOperationAction(ISD::FSHR, XLenVT, Custom);
287     setOperationAction(ISD::SELECT, XLenVT, Legal);
288 
289     if (Subtarget.is64Bit()) {
290       setOperationAction(ISD::FSHL, MVT::i32, Custom);
291       setOperationAction(ISD::FSHR, MVT::i32, Custom);
292     }
293   } else {
294     setOperationAction(ISD::SELECT, XLenVT, Custom);
295   }
296 
297   ISD::CondCode FPCCToExpand[] = {
298       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
299       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
300       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
301 
302   ISD::NodeType FPOpToExpand[] = {
303       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
304       ISD::FP_TO_FP16};
305 
306   if (Subtarget.hasStdExtZfh())
307     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
308 
309   if (Subtarget.hasStdExtZfh()) {
310     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
311     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
312     for (auto CC : FPCCToExpand)
313       setCondCodeAction(CC, MVT::f16, Expand);
314     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
315     setOperationAction(ISD::SELECT, MVT::f16, Custom);
316     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
317     for (auto Op : FPOpToExpand)
318       setOperationAction(Op, MVT::f16, Expand);
319   }
320 
321   if (Subtarget.hasStdExtF()) {
322     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
323     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
324     for (auto CC : FPCCToExpand)
325       setCondCodeAction(CC, MVT::f32, Expand);
326     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
327     setOperationAction(ISD::SELECT, MVT::f32, Custom);
328     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
329     for (auto Op : FPOpToExpand)
330       setOperationAction(Op, MVT::f32, Expand);
331     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
332     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
333   }
334 
335   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
336     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
337 
338   if (Subtarget.hasStdExtD()) {
339     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
340     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
341     for (auto CC : FPCCToExpand)
342       setCondCodeAction(CC, MVT::f64, Expand);
343     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
344     setOperationAction(ISD::SELECT, MVT::f64, Custom);
345     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
346     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
347     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
348     for (auto Op : FPOpToExpand)
349       setOperationAction(Op, MVT::f64, Expand);
350     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
351     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
352   }
353 
354   if (Subtarget.is64Bit()) {
355     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
356     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
357     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
358     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
359   }
360 
361   if (Subtarget.hasStdExtF()) {
362     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
363     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
364   }
365 
366   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
367   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
368   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
369   setOperationAction(ISD::JumpTable, XLenVT, Custom);
370 
371   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
372 
373   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
374   // Unfortunately this can't be determined just from the ISA naming string.
375   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
376                      Subtarget.is64Bit() ? Legal : Custom);
377 
378   setOperationAction(ISD::TRAP, MVT::Other, Legal);
379   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
380   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
381   if (Subtarget.is64Bit())
382     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
383 
384   if (Subtarget.hasStdExtA()) {
385     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
386     setMinCmpXchgSizeInBits(32);
387   } else {
388     setMaxAtomicSizeInBitsSupported(0);
389   }
390 
391   setBooleanContents(ZeroOrOneBooleanContent);
392 
393   if (Subtarget.hasStdExtV()) {
394     setBooleanVectorContents(ZeroOrOneBooleanContent);
395 
396     setOperationAction(ISD::VSCALE, XLenVT, Custom);
397 
398     // RVV intrinsics may have illegal operands.
399     // We also need to custom legalize vmv.x.s.
400     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
401     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
402     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
403     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
404     if (Subtarget.is64Bit()) {
405       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
406     } else {
407       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
408       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
409     }
410 
411     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
412 
413     static unsigned IntegerVPOps[] = {
414         ISD::VP_ADD,  ISD::VP_SUB,  ISD::VP_MUL, ISD::VP_SDIV, ISD::VP_UDIV,
415         ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,   ISD::VP_XOR,
416         ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL};
417 
418     if (!Subtarget.is64Bit()) {
419       // We must custom-lower certain vXi64 operations on RV32 due to the vector
420       // element type being illegal.
421       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
422       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
423 
424       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
425       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
426       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
427       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
428       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
429       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
430       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
431       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
432     }
433 
434     for (MVT VT : BoolVecVTs) {
435       setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
436 
437       // Mask VTs are custom-expanded into a series of standard nodes
438       setOperationAction(ISD::TRUNCATE, VT, Custom);
439       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
440       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
441 
442       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
443       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
444 
445       setOperationAction(ISD::SELECT, VT, Expand);
446       setOperationAction(ISD::SELECT_CC, VT, Expand);
447       setOperationAction(ISD::VSELECT, VT, Expand);
448 
449       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
450       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
451       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
452 
453       // RVV has native int->float & float->int conversions where the
454       // element type sizes are within one power-of-two of each other. Any
455       // wider distances between type sizes have to be lowered as sequences
456       // which progressively narrow the gap in stages.
457       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
458       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
459       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
460       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
461 
462       // Expand all extending loads to types larger than this, and truncating
463       // stores from types larger than this.
464       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
465         setTruncStoreAction(OtherVT, VT, Expand);
466         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
467         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
468         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
469       }
470     }
471 
472     for (MVT VT : IntVecVTs) {
473       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
474       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
475 
476       setOperationAction(ISD::SMIN, VT, Legal);
477       setOperationAction(ISD::SMAX, VT, Legal);
478       setOperationAction(ISD::UMIN, VT, Legal);
479       setOperationAction(ISD::UMAX, VT, Legal);
480 
481       setOperationAction(ISD::ROTL, VT, Expand);
482       setOperationAction(ISD::ROTR, VT, Expand);
483 
484       // Custom-lower extensions and truncations from/to mask types.
485       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
486       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
487       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
488 
489       // RVV has native int->float & float->int conversions where the
490       // element type sizes are within one power-of-two of each other. Any
491       // wider distances between type sizes have to be lowered as sequences
492       // which progressively narrow the gap in stages.
493       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
494       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
495       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
496       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
497 
498       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
499       // nodes which truncate by one power of two at a time.
500       setOperationAction(ISD::TRUNCATE, VT, Custom);
501 
502       // Custom-lower insert/extract operations to simplify patterns.
503       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
504       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
505 
506       // Custom-lower reduction operations to set up the corresponding custom
507       // nodes' operands.
508       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
509       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
510       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
511       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
512       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
513       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
514       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
515       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
516 
517       for (unsigned VPOpc : IntegerVPOps)
518         setOperationAction(VPOpc, VT, Custom);
519 
520       setOperationAction(ISD::MLOAD, VT, Custom);
521       setOperationAction(ISD::MSTORE, VT, Custom);
522       setOperationAction(ISD::MGATHER, VT, Custom);
523       setOperationAction(ISD::MSCATTER, VT, Custom);
524 
525       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
526       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
527       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
528 
529       setOperationAction(ISD::SELECT, VT, Expand);
530       setOperationAction(ISD::SELECT_CC, VT, Expand);
531 
532       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
533       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
534 
535       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
536         setTruncStoreAction(VT, OtherVT, Expand);
537         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
538         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
539         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
540       }
541     }
542 
543     // Expand various CCs to best match the RVV ISA, which natively supports UNE
544     // but no other unordered comparisons, and supports all ordered comparisons
545     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
546     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
547     // and we pattern-match those back to the "original", swapping operands once
548     // more. This way we catch both operations and both "vf" and "fv" forms with
549     // fewer patterns.
550     ISD::CondCode VFPCCToExpand[] = {
551         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
552         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
553         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
554     };
555 
556     // Sets common operation actions on RVV floating-point vector types.
557     const auto SetCommonVFPActions = [&](MVT VT) {
558       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
559       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
560       // sizes are within one power-of-two of each other. Therefore conversions
561       // between vXf16 and vXf64 must be lowered as sequences which convert via
562       // vXf32.
563       setOperationAction(ISD::FP_ROUND, VT, Custom);
564       setOperationAction(ISD::FP_EXTEND, VT, Custom);
565       // Custom-lower insert/extract operations to simplify patterns.
566       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
567       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
568       // Expand various condition codes (explained above).
569       for (auto CC : VFPCCToExpand)
570         setCondCodeAction(CC, VT, Expand);
571 
572       setOperationAction(ISD::FMINNUM, VT, Legal);
573       setOperationAction(ISD::FMAXNUM, VT, Legal);
574 
575       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
576       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
577       setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
578       setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
579       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
580 
581       setOperationAction(ISD::MLOAD, VT, Custom);
582       setOperationAction(ISD::MSTORE, VT, Custom);
583       setOperationAction(ISD::MGATHER, VT, Custom);
584       setOperationAction(ISD::MSCATTER, VT, Custom);
585 
586       setOperationAction(ISD::SELECT, VT, Expand);
587       setOperationAction(ISD::SELECT_CC, VT, Expand);
588 
589       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
590       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
591       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
592 
593       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
594     };
595 
596     // Sets common extload/truncstore actions on RVV floating-point vector
597     // types.
598     const auto SetCommonVFPExtLoadTruncStoreActions =
599         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
600           for (auto SmallVT : SmallerVTs) {
601             setTruncStoreAction(VT, SmallVT, Expand);
602             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
603           }
604         };
605 
606     if (Subtarget.hasStdExtZfh())
607       for (MVT VT : F16VecVTs)
608         SetCommonVFPActions(VT);
609 
610     for (MVT VT : F32VecVTs) {
611       if (Subtarget.hasStdExtF())
612         SetCommonVFPActions(VT);
613       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
614     }
615 
616     for (MVT VT : F64VecVTs) {
617       if (Subtarget.hasStdExtD())
618         SetCommonVFPActions(VT);
619       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
620       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
621     }
622 
623     if (Subtarget.useRVVForFixedLengthVectors()) {
624       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
625         if (!useRVVForFixedLengthVectorVT(VT))
626           continue;
627 
628         // By default everything must be expanded.
629         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
630           setOperationAction(Op, VT, Expand);
631         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
632           setTruncStoreAction(VT, OtherVT, Expand);
633           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
634           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
635           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
636         }
637 
638         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
639         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
640         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
641 
642         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
643         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
644 
645         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
646         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
647 
648         setOperationAction(ISD::LOAD, VT, Custom);
649         setOperationAction(ISD::STORE, VT, Custom);
650 
651         setOperationAction(ISD::SETCC, VT, Custom);
652 
653         setOperationAction(ISD::TRUNCATE, VT, Custom);
654 
655         setOperationAction(ISD::BITCAST, VT, Custom);
656 
657         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
658         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
659         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
660 
661         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
662         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
663         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
664         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
665 
666         // Operations below are different for between masks and other vectors.
667         if (VT.getVectorElementType() == MVT::i1) {
668           setOperationAction(ISD::AND, VT, Custom);
669           setOperationAction(ISD::OR, VT, Custom);
670           setOperationAction(ISD::XOR, VT, Custom);
671           continue;
672         }
673 
674         // Use SPLAT_VECTOR to prevent type legalization from destroying the
675         // splats when type legalizing i64 scalar on RV32.
676         // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
677         // improvements first.
678         if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
679           setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
680           setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
681         }
682 
683         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
684         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
685 
686         setOperationAction(ISD::MLOAD, VT, Custom);
687         setOperationAction(ISD::MSTORE, VT, Custom);
688         setOperationAction(ISD::MGATHER, VT, Custom);
689         setOperationAction(ISD::MSCATTER, VT, Custom);
690         setOperationAction(ISD::ADD, VT, Custom);
691         setOperationAction(ISD::MUL, VT, Custom);
692         setOperationAction(ISD::SUB, VT, Custom);
693         setOperationAction(ISD::AND, VT, Custom);
694         setOperationAction(ISD::OR, VT, Custom);
695         setOperationAction(ISD::XOR, VT, Custom);
696         setOperationAction(ISD::SDIV, VT, Custom);
697         setOperationAction(ISD::SREM, VT, Custom);
698         setOperationAction(ISD::UDIV, VT, Custom);
699         setOperationAction(ISD::UREM, VT, Custom);
700         setOperationAction(ISD::SHL, VT, Custom);
701         setOperationAction(ISD::SRA, VT, Custom);
702         setOperationAction(ISD::SRL, VT, Custom);
703 
704         setOperationAction(ISD::SMIN, VT, Custom);
705         setOperationAction(ISD::SMAX, VT, Custom);
706         setOperationAction(ISD::UMIN, VT, Custom);
707         setOperationAction(ISD::UMAX, VT, Custom);
708         setOperationAction(ISD::ABS,  VT, Custom);
709 
710         setOperationAction(ISD::MULHS, VT, Custom);
711         setOperationAction(ISD::MULHU, VT, Custom);
712 
713         setOperationAction(ISD::VSELECT, VT, Custom);
714         setOperationAction(ISD::SELECT, VT, Expand);
715         setOperationAction(ISD::SELECT_CC, VT, Expand);
716 
717         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
718         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
719         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
720 
721         // Custom-lower reduction operations to set up the corresponding custom
722         // nodes' operands.
723         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
724         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
725         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
726         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
727         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
728 
729         for (unsigned VPOpc : IntegerVPOps)
730           setOperationAction(VPOpc, VT, Custom);
731       }
732 
733       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
734         if (!useRVVForFixedLengthVectorVT(VT))
735           continue;
736 
737         // By default everything must be expanded.
738         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
739           setOperationAction(Op, VT, Expand);
740         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
741           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
742           setTruncStoreAction(VT, OtherVT, Expand);
743         }
744 
745         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
746         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
747         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
748 
749         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
750         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
751         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
752         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
753 
754         setOperationAction(ISD::LOAD, VT, Custom);
755         setOperationAction(ISD::STORE, VT, Custom);
756         setOperationAction(ISD::MLOAD, VT, Custom);
757         setOperationAction(ISD::MSTORE, VT, Custom);
758         setOperationAction(ISD::MGATHER, VT, Custom);
759         setOperationAction(ISD::MSCATTER, VT, Custom);
760         setOperationAction(ISD::FADD, VT, Custom);
761         setOperationAction(ISD::FSUB, VT, Custom);
762         setOperationAction(ISD::FMUL, VT, Custom);
763         setOperationAction(ISD::FDIV, VT, Custom);
764         setOperationAction(ISD::FNEG, VT, Custom);
765         setOperationAction(ISD::FABS, VT, Custom);
766         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
767         setOperationAction(ISD::FSQRT, VT, Custom);
768         setOperationAction(ISD::FMA, VT, Custom);
769         setOperationAction(ISD::FMINNUM, VT, Custom);
770         setOperationAction(ISD::FMAXNUM, VT, Custom);
771 
772         setOperationAction(ISD::FP_ROUND, VT, Custom);
773         setOperationAction(ISD::FP_EXTEND, VT, Custom);
774 
775         for (auto CC : VFPCCToExpand)
776           setCondCodeAction(CC, VT, Expand);
777 
778         setOperationAction(ISD::VSELECT, VT, Custom);
779         setOperationAction(ISD::SELECT, VT, Expand);
780         setOperationAction(ISD::SELECT_CC, VT, Expand);
781 
782         setOperationAction(ISD::BITCAST, VT, Custom);
783 
784         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
785         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
786         setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
787         setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
788       }
789 
790       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
791       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
792       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
793       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
794       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
795       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
796       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
797       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
798     }
799   }
800 
801   // Function alignments.
802   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
803   setMinFunctionAlignment(FunctionAlignment);
804   setPrefFunctionAlignment(FunctionAlignment);
805 
806   setMinimumJumpTableEntries(5);
807 
808   // Jumps are expensive, compared to logic
809   setJumpIsExpensive();
810 
811   // We can use any register for comparisons
812   setHasMultipleConditionRegisters();
813 
814   setTargetDAGCombine(ISD::AND);
815   setTargetDAGCombine(ISD::OR);
816   setTargetDAGCombine(ISD::XOR);
817   if (Subtarget.hasStdExtV()) {
818     setTargetDAGCombine(ISD::FCOPYSIGN);
819     setTargetDAGCombine(ISD::MGATHER);
820     setTargetDAGCombine(ISD::MSCATTER);
821     setTargetDAGCombine(ISD::SRA);
822     setTargetDAGCombine(ISD::SRL);
823     setTargetDAGCombine(ISD::SHL);
824   }
825 }
826 
827 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
828                                             LLVMContext &Context,
829                                             EVT VT) const {
830   if (!VT.isVector())
831     return getPointerTy(DL);
832   if (Subtarget.hasStdExtV() &&
833       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
834     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
835   return VT.changeVectorElementTypeToInteger();
836 }
837 
838 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
839   return Subtarget.getXLenVT();
840 }
841 
842 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
843                                              const CallInst &I,
844                                              MachineFunction &MF,
845                                              unsigned Intrinsic) const {
846   switch (Intrinsic) {
847   default:
848     return false;
849   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
850   case Intrinsic::riscv_masked_atomicrmw_add_i32:
851   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
852   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
853   case Intrinsic::riscv_masked_atomicrmw_max_i32:
854   case Intrinsic::riscv_masked_atomicrmw_min_i32:
855   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
856   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
857   case Intrinsic::riscv_masked_cmpxchg_i32:
858     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
859     Info.opc = ISD::INTRINSIC_W_CHAIN;
860     Info.memVT = MVT::getVT(PtrTy->getElementType());
861     Info.ptrVal = I.getArgOperand(0);
862     Info.offset = 0;
863     Info.align = Align(4);
864     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
865                  MachineMemOperand::MOVolatile;
866     return true;
867   }
868 }
869 
870 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
871                                                 const AddrMode &AM, Type *Ty,
872                                                 unsigned AS,
873                                                 Instruction *I) const {
874   // No global is ever allowed as a base.
875   if (AM.BaseGV)
876     return false;
877 
878   // Require a 12-bit signed offset.
879   if (!isInt<12>(AM.BaseOffs))
880     return false;
881 
882   switch (AM.Scale) {
883   case 0: // "r+i" or just "i", depending on HasBaseReg.
884     break;
885   case 1:
886     if (!AM.HasBaseReg) // allow "r+i".
887       break;
888     return false; // disallow "r+r" or "r+r+i".
889   default:
890     return false;
891   }
892 
893   return true;
894 }
895 
896 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
897   return isInt<12>(Imm);
898 }
899 
900 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
901   return isInt<12>(Imm);
902 }
903 
904 // On RV32, 64-bit integers are split into their high and low parts and held
905 // in two different registers, so the trunc is free since the low register can
906 // just be used.
907 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
908   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
909     return false;
910   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
911   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
912   return (SrcBits == 64 && DestBits == 32);
913 }
914 
915 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
916   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
917       !SrcVT.isInteger() || !DstVT.isInteger())
918     return false;
919   unsigned SrcBits = SrcVT.getSizeInBits();
920   unsigned DestBits = DstVT.getSizeInBits();
921   return (SrcBits == 64 && DestBits == 32);
922 }
923 
924 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
925   // Zexts are free if they can be combined with a load.
926   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
927     EVT MemVT = LD->getMemoryVT();
928     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
929          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
930         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
931          LD->getExtensionType() == ISD::ZEXTLOAD))
932       return true;
933   }
934 
935   return TargetLowering::isZExtFree(Val, VT2);
936 }
937 
938 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
939   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
940 }
941 
942 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
943   return Subtarget.hasStdExtZbb();
944 }
945 
946 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
947   return Subtarget.hasStdExtZbb();
948 }
949 
950 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
951                                        bool ForCodeSize) const {
952   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
953     return false;
954   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
955     return false;
956   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
957     return false;
958   if (Imm.isNegZero())
959     return false;
960   return Imm.isZero();
961 }
962 
963 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
964   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
965          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
966          (VT == MVT::f64 && Subtarget.hasStdExtD());
967 }
968 
969 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
970                                                       CallingConv::ID CC,
971                                                       EVT VT) const {
972   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
973   // end up using a GPR but that will be decided based on ABI.
974   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
975     return MVT::f32;
976 
977   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
978 }
979 
980 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
981                                                            CallingConv::ID CC,
982                                                            EVT VT) const {
983   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
984   // end up using a GPR but that will be decided based on ABI.
985   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
986     return 1;
987 
988   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
989 }
990 
991 // Changes the condition code and swaps operands if necessary, so the SetCC
992 // operation matches one of the comparisons supported directly by branches
993 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
994 // with 1/-1.
995 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
996                                     ISD::CondCode &CC, SelectionDAG &DAG) {
997   // Convert X > -1 to X >= 0.
998   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
999     RHS = DAG.getConstant(0, DL, RHS.getValueType());
1000     CC = ISD::SETGE;
1001     return;
1002   }
1003   // Convert X < 1 to 0 >= X.
1004   if (CC == ISD::SETLT && isOneConstant(RHS)) {
1005     RHS = LHS;
1006     LHS = DAG.getConstant(0, DL, RHS.getValueType());
1007     CC = ISD::SETGE;
1008     return;
1009   }
1010 
1011   switch (CC) {
1012   default:
1013     break;
1014   case ISD::SETGT:
1015   case ISD::SETLE:
1016   case ISD::SETUGT:
1017   case ISD::SETULE:
1018     CC = ISD::getSetCCSwappedOperands(CC);
1019     std::swap(LHS, RHS);
1020     break;
1021   }
1022 }
1023 
1024 // Return the RISC-V branch opcode that matches the given DAG integer
1025 // condition code. The CondCode must be one of those supported by the RISC-V
1026 // ISA (see translateSetCCForBranch).
1027 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
1028   switch (CC) {
1029   default:
1030     llvm_unreachable("Unsupported CondCode");
1031   case ISD::SETEQ:
1032     return RISCV::BEQ;
1033   case ISD::SETNE:
1034     return RISCV::BNE;
1035   case ISD::SETLT:
1036     return RISCV::BLT;
1037   case ISD::SETGE:
1038     return RISCV::BGE;
1039   case ISD::SETULT:
1040     return RISCV::BLTU;
1041   case ISD::SETUGE:
1042     return RISCV::BGEU;
1043   }
1044 }
1045 
1046 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1047   assert(VT.isScalableVector() && "Expecting a scalable vector type");
1048   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1049   if (VT.getVectorElementType() == MVT::i1)
1050     KnownSize *= 8;
1051 
1052   switch (KnownSize) {
1053   default:
1054     llvm_unreachable("Invalid LMUL.");
1055   case 8:
1056     return RISCVII::VLMUL::LMUL_F8;
1057   case 16:
1058     return RISCVII::VLMUL::LMUL_F4;
1059   case 32:
1060     return RISCVII::VLMUL::LMUL_F2;
1061   case 64:
1062     return RISCVII::VLMUL::LMUL_1;
1063   case 128:
1064     return RISCVII::VLMUL::LMUL_2;
1065   case 256:
1066     return RISCVII::VLMUL::LMUL_4;
1067   case 512:
1068     return RISCVII::VLMUL::LMUL_8;
1069   }
1070 }
1071 
1072 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1073   switch (LMul) {
1074   default:
1075     llvm_unreachable("Invalid LMUL.");
1076   case RISCVII::VLMUL::LMUL_F8:
1077   case RISCVII::VLMUL::LMUL_F4:
1078   case RISCVII::VLMUL::LMUL_F2:
1079   case RISCVII::VLMUL::LMUL_1:
1080     return RISCV::VRRegClassID;
1081   case RISCVII::VLMUL::LMUL_2:
1082     return RISCV::VRM2RegClassID;
1083   case RISCVII::VLMUL::LMUL_4:
1084     return RISCV::VRM4RegClassID;
1085   case RISCVII::VLMUL::LMUL_8:
1086     return RISCV::VRM8RegClassID;
1087   }
1088 }
1089 
1090 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1091   RISCVII::VLMUL LMUL = getLMUL(VT);
1092   if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1093       LMUL == RISCVII::VLMUL::LMUL_F4 ||
1094       LMUL == RISCVII::VLMUL::LMUL_F2 ||
1095       LMUL == RISCVII::VLMUL::LMUL_1) {
1096     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1097                   "Unexpected subreg numbering");
1098     return RISCV::sub_vrm1_0 + Index;
1099   }
1100   if (LMUL == RISCVII::VLMUL::LMUL_2) {
1101     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1102                   "Unexpected subreg numbering");
1103     return RISCV::sub_vrm2_0 + Index;
1104   }
1105   if (LMUL == RISCVII::VLMUL::LMUL_4) {
1106     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1107                   "Unexpected subreg numbering");
1108     return RISCV::sub_vrm4_0 + Index;
1109   }
1110   llvm_unreachable("Invalid vector type.");
1111 }
1112 
1113 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1114   if (VT.getVectorElementType() == MVT::i1)
1115     return RISCV::VRRegClassID;
1116   return getRegClassIDForLMUL(getLMUL(VT));
1117 }
1118 
1119 // Attempt to decompose a subvector insert/extract between VecVT and
1120 // SubVecVT via subregister indices. Returns the subregister index that
1121 // can perform the subvector insert/extract with the given element index, as
1122 // well as the index corresponding to any leftover subvectors that must be
1123 // further inserted/extracted within the register class for SubVecVT.
1124 std::pair<unsigned, unsigned>
1125 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1126     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1127     const RISCVRegisterInfo *TRI) {
1128   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1129                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1130                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1131                 "Register classes not ordered");
1132   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1133   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1134   // Try to compose a subregister index that takes us from the incoming
1135   // LMUL>1 register class down to the outgoing one. At each step we half
1136   // the LMUL:
1137   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1138   // Note that this is not guaranteed to find a subregister index, such as
1139   // when we are extracting from one VR type to another.
1140   unsigned SubRegIdx = RISCV::NoSubRegister;
1141   for (const unsigned RCID :
1142        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1143     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1144       VecVT = VecVT.getHalfNumVectorElementsVT();
1145       bool IsHi =
1146           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1147       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1148                                             getSubregIndexByMVT(VecVT, IsHi));
1149       if (IsHi)
1150         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1151     }
1152   return {SubRegIdx, InsertExtractIdx};
1153 }
1154 
1155 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1156 // stores for those types.
1157 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1158   return !Subtarget.useRVVForFixedLengthVectors() ||
1159          (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1160 }
1161 
1162 static bool useRVVForFixedLengthVectorVT(MVT VT,
1163                                          const RISCVSubtarget &Subtarget) {
1164   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1165   if (!Subtarget.useRVVForFixedLengthVectors())
1166     return false;
1167 
1168   // We only support a set of vector types with an equivalent number of
1169   // elements to avoid legalization issues. Therefore -- since we don't have
1170   // v512i8/v512i16/etc -- the longest fixed-length vector type we support has
1171   // 256 elements.
1172   if (VT.getVectorNumElements() > 256)
1173     return false;
1174 
1175   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1176 
1177   // Don't use RVV for vectors we cannot scalarize if required.
1178   switch (VT.getVectorElementType().SimpleTy) {
1179   // i1 is supported but has different rules.
1180   default:
1181     return false;
1182   case MVT::i1:
1183     // Masks can only use a single register.
1184     if (VT.getVectorNumElements() > MinVLen)
1185       return false;
1186     MinVLen /= 8;
1187     break;
1188   case MVT::i8:
1189   case MVT::i16:
1190   case MVT::i32:
1191   case MVT::i64:
1192     break;
1193   case MVT::f16:
1194     if (!Subtarget.hasStdExtZfh())
1195       return false;
1196     break;
1197   case MVT::f32:
1198     if (!Subtarget.hasStdExtF())
1199       return false;
1200     break;
1201   case MVT::f64:
1202     if (!Subtarget.hasStdExtD())
1203       return false;
1204     break;
1205   }
1206 
1207   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1208   // Don't use RVV for types that don't fit.
1209   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1210     return false;
1211 
1212   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1213   // the base fixed length RVV support in place.
1214   if (!VT.isPow2VectorType())
1215     return false;
1216 
1217   return true;
1218 }
1219 
1220 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1221   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1222 }
1223 
1224 // Return the largest legal scalable vector type that matches VT's element type.
1225 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1226                                             const RISCVSubtarget &Subtarget) {
1227   // This may be called before legal types are setup.
1228   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1229           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1230          "Expected legal fixed length vector!");
1231 
1232   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1233 
1234   MVT EltVT = VT.getVectorElementType();
1235   switch (EltVT.SimpleTy) {
1236   default:
1237     llvm_unreachable("unexpected element type for RVV container");
1238   case MVT::i1:
1239   case MVT::i8:
1240   case MVT::i16:
1241   case MVT::i32:
1242   case MVT::i64:
1243   case MVT::f16:
1244   case MVT::f32:
1245   case MVT::f64: {
1246     // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1247     // narrower types, but we can't have a fractional LMUL with demoninator less
1248     // than 64/SEW.
1249     unsigned NumElts =
1250         divideCeil(VT.getVectorNumElements(), MinVLen / RISCV::RVVBitsPerBlock);
1251     return MVT::getScalableVectorVT(EltVT, NumElts);
1252   }
1253   }
1254 }
1255 
1256 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1257                                             const RISCVSubtarget &Subtarget) {
1258   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1259                                           Subtarget);
1260 }
1261 
1262 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1263   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1264 }
1265 
1266 // Grow V to consume an entire RVV register.
1267 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1268                                        const RISCVSubtarget &Subtarget) {
1269   assert(VT.isScalableVector() &&
1270          "Expected to convert into a scalable vector!");
1271   assert(V.getValueType().isFixedLengthVector() &&
1272          "Expected a fixed length vector operand!");
1273   SDLoc DL(V);
1274   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1275   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1276 }
1277 
1278 // Shrink V so it's just big enough to maintain a VT's worth of data.
1279 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1280                                          const RISCVSubtarget &Subtarget) {
1281   assert(VT.isFixedLengthVector() &&
1282          "Expected to convert into a fixed length vector!");
1283   assert(V.getValueType().isScalableVector() &&
1284          "Expected a scalable vector operand!");
1285   SDLoc DL(V);
1286   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1287   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1288 }
1289 
1290 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1291 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1292 // the vector type that it is contained in.
1293 static std::pair<SDValue, SDValue>
1294 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1295                 const RISCVSubtarget &Subtarget) {
1296   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1297   MVT XLenVT = Subtarget.getXLenVT();
1298   SDValue VL = VecVT.isFixedLengthVector()
1299                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1300                    : DAG.getRegister(RISCV::X0, XLenVT);
1301   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1302   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1303   return {Mask, VL};
1304 }
1305 
1306 // As above but assuming the given type is a scalable vector type.
1307 static std::pair<SDValue, SDValue>
1308 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1309                         const RISCVSubtarget &Subtarget) {
1310   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1311   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1312 }
1313 
1314 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1315 // of either is (currently) supported. This can get us into an infinite loop
1316 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1317 // as a ..., etc.
1318 // Until either (or both) of these can reliably lower any node, reporting that
1319 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1320 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1321 // which is not desirable.
1322 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1323     EVT VT, unsigned DefinedValues) const {
1324   return false;
1325 }
1326 
1327 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1328   // Only splats are currently supported.
1329   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1330     return true;
1331 
1332   return false;
1333 }
1334 
1335 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1336                                  const RISCVSubtarget &Subtarget) {
1337   MVT VT = Op.getSimpleValueType();
1338   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1339 
1340   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1341 
1342   SDLoc DL(Op);
1343   SDValue Mask, VL;
1344   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1345 
1346   unsigned Opc =
1347       VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1348   SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1349   return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1350 }
1351 
1352 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1353                                  const RISCVSubtarget &Subtarget) {
1354   MVT VT = Op.getSimpleValueType();
1355   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1356 
1357   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1358 
1359   SDLoc DL(Op);
1360   SDValue Mask, VL;
1361   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1362 
1363   MVT XLenVT = Subtarget.getXLenVT();
1364   unsigned NumElts = Op.getNumOperands();
1365 
1366   if (VT.getVectorElementType() == MVT::i1) {
1367     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1368       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1369       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1370     }
1371 
1372     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1373       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1374       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1375     }
1376 
1377     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1378     // scalar integer chunks whose bit-width depends on the number of mask
1379     // bits and XLEN.
1380     // First, determine the most appropriate scalar integer type to use. This
1381     // is at most XLenVT, but may be shrunk to a smaller vector element type
1382     // according to the size of the final vector - use i8 chunks rather than
1383     // XLenVT if we're producing a v8i1. This results in more consistent
1384     // codegen across RV32 and RV64.
1385     unsigned NumViaIntegerBits =
1386         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1387     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1388       // If we have to use more than one INSERT_VECTOR_ELT then this
1389       // optimization is likely to increase code size; avoid peforming it in
1390       // such a case. We can use a load from a constant pool in this case.
1391       if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1392         return SDValue();
1393       // Now we can create our integer vector type. Note that it may be larger
1394       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1395       MVT IntegerViaVecVT =
1396           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1397                            divideCeil(NumElts, NumViaIntegerBits));
1398 
1399       uint64_t Bits = 0;
1400       unsigned BitPos = 0, IntegerEltIdx = 0;
1401       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1402 
1403       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1404         // Once we accumulate enough bits to fill our scalar type, insert into
1405         // our vector and clear our accumulated data.
1406         if (I != 0 && I % NumViaIntegerBits == 0) {
1407           if (NumViaIntegerBits <= 32)
1408             Bits = SignExtend64(Bits, 32);
1409           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1410           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1411                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1412           Bits = 0;
1413           BitPos = 0;
1414           IntegerEltIdx++;
1415         }
1416         SDValue V = Op.getOperand(I);
1417         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1418         Bits |= ((uint64_t)BitValue << BitPos);
1419       }
1420 
1421       // Insert the (remaining) scalar value into position in our integer
1422       // vector type.
1423       if (NumViaIntegerBits <= 32)
1424         Bits = SignExtend64(Bits, 32);
1425       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1426       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1427                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1428 
1429       if (NumElts < NumViaIntegerBits) {
1430         // If we're producing a smaller vector than our minimum legal integer
1431         // type, bitcast to the equivalent (known-legal) mask type, and extract
1432         // our final mask.
1433         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1434         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1435         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1436                           DAG.getConstant(0, DL, XLenVT));
1437       } else {
1438         // Else we must have produced an integer type with the same size as the
1439         // mask type; bitcast for the final result.
1440         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1441         Vec = DAG.getBitcast(VT, Vec);
1442       }
1443 
1444       return Vec;
1445     }
1446 
1447     // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1448     // vector type, we have a legal equivalently-sized i8 type, so we can use
1449     // that.
1450     MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1451     SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1452 
1453     SDValue WideVec;
1454     if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1455       // For a splat, perform a scalar truncate before creating the wider
1456       // vector.
1457       assert(Splat.getValueType() == XLenVT &&
1458              "Unexpected type for i1 splat value");
1459       Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1460                           DAG.getConstant(1, DL, XLenVT));
1461       WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1462     } else {
1463       SmallVector<SDValue, 8> Ops(Op->op_values());
1464       WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1465       SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1466       WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1467     }
1468 
1469     return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1470   }
1471 
1472   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1473     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1474                                         : RISCVISD::VMV_V_X_VL;
1475     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1476     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1477   }
1478 
1479   // Try and match an index sequence, which we can lower directly to the vid
1480   // instruction. An all-undef vector is matched by getSplatValue, above.
1481   if (VT.isInteger()) {
1482     bool IsVID = true;
1483     for (unsigned I = 0; I < NumElts && IsVID; I++)
1484       IsVID &= Op.getOperand(I).isUndef() ||
1485                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1486                 Op.getConstantOperandVal(I) == I);
1487 
1488     if (IsVID) {
1489       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1490       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1491     }
1492   }
1493 
1494   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1495   // when re-interpreted as a vector with a larger element type. For example,
1496   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1497   // could be instead splat as
1498   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1499   // TODO: This optimization could also work on non-constant splats, but it
1500   // would require bit-manipulation instructions to construct the splat value.
1501   SmallVector<SDValue> Sequence;
1502   unsigned EltBitSize = VT.getScalarSizeInBits();
1503   const auto *BV = cast<BuildVectorSDNode>(Op);
1504   if (VT.isInteger() && EltBitSize < 64 &&
1505       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1506       BV->getRepeatedSequence(Sequence) &&
1507       (Sequence.size() * EltBitSize) <= 64) {
1508     unsigned SeqLen = Sequence.size();
1509     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1510     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1511     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1512             ViaIntVT == MVT::i64) &&
1513            "Unexpected sequence type");
1514 
1515     unsigned EltIdx = 0;
1516     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1517     uint64_t SplatValue = 0;
1518     // Construct the amalgamated value which can be splatted as this larger
1519     // vector type.
1520     for (const auto &SeqV : Sequence) {
1521       if (!SeqV.isUndef())
1522         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1523                        << (EltIdx * EltBitSize));
1524       EltIdx++;
1525     }
1526 
1527     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1528     // achieve better constant materializion.
1529     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1530       SplatValue = SignExtend64(SplatValue, 32);
1531 
1532     // Since we can't introduce illegal i64 types at this stage, we can only
1533     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1534     // way we can use RVV instructions to splat.
1535     assert((ViaIntVT.bitsLE(XLenVT) ||
1536             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1537            "Unexpected bitcast sequence");
1538     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1539       SDValue ViaVL =
1540           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1541       MVT ViaContainerVT =
1542           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1543       SDValue Splat =
1544           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1545                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1546       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1547       return DAG.getBitcast(VT, Splat);
1548     }
1549   }
1550 
1551   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1552   // which constitute a large proportion of the elements. In such cases we can
1553   // splat a vector with the dominant element and make up the shortfall with
1554   // INSERT_VECTOR_ELTs.
1555   // Note that this includes vectors of 2 elements by association. The
1556   // upper-most element is the "dominant" one, allowing us to use a splat to
1557   // "insert" the upper element, and an insert of the lower element at position
1558   // 0, which improves codegen.
1559   SDValue DominantValue;
1560   unsigned MostCommonCount = 0;
1561   DenseMap<SDValue, unsigned> ValueCounts;
1562   unsigned NumUndefElts =
1563       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1564 
1565   for (SDValue V : Op->op_values()) {
1566     if (V.isUndef())
1567       continue;
1568 
1569     ValueCounts.insert(std::make_pair(V, 0));
1570     unsigned &Count = ValueCounts[V];
1571 
1572     // Is this value dominant? In case of a tie, prefer the highest element as
1573     // it's cheaper to insert near the beginning of a vector than it is at the
1574     // end.
1575     if (++Count >= MostCommonCount) {
1576       DominantValue = V;
1577       MostCommonCount = Count;
1578     }
1579   }
1580 
1581   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1582   unsigned NumDefElts = NumElts - NumUndefElts;
1583   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1584 
1585   // Don't perform this optimization when optimizing for size, since
1586   // materializing elements and inserting them tends to cause code bloat.
1587   if (!DAG.shouldOptForSize() &&
1588       ((MostCommonCount > DominantValueCountThreshold) ||
1589        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1590     // Start by splatting the most common element.
1591     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1592 
1593     DenseSet<SDValue> Processed{DominantValue};
1594     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1595     for (const auto &OpIdx : enumerate(Op->ops())) {
1596       const SDValue &V = OpIdx.value();
1597       if (V.isUndef() || !Processed.insert(V).second)
1598         continue;
1599       if (ValueCounts[V] == 1) {
1600         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1601                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1602       } else {
1603         // Blend in all instances of this value using a VSELECT, using a
1604         // mask where each bit signals whether that element is the one
1605         // we're after.
1606         SmallVector<SDValue> Ops;
1607         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1608           return DAG.getConstant(V == V1, DL, XLenVT);
1609         });
1610         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1611                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1612                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1613       }
1614     }
1615 
1616     return Vec;
1617   }
1618 
1619   return SDValue();
1620 }
1621 
1622 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1623                                    SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1624   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1625     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1626     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1627     // If Hi constant is all the same sign bit as Lo, lower this as a custom
1628     // node in order to try and match RVV vector/scalar instructions.
1629     if ((LoC >> 31) == HiC)
1630       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1631   }
1632 
1633   // Fall back to a stack store and stride x0 vector load.
1634   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
1635 }
1636 
1637 // Called by type legalization to handle splat of i64 on RV32.
1638 // FIXME: We can optimize this when the type has sign or zero bits in one
1639 // of the halves.
1640 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1641                                    SDValue VL, SelectionDAG &DAG) {
1642   assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
1643   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1644                            DAG.getConstant(0, DL, MVT::i32));
1645   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1646                            DAG.getConstant(1, DL, MVT::i32));
1647   return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
1648 }
1649 
1650 // This function lowers a splat of a scalar operand Splat with the vector
1651 // length VL. It ensures the final sequence is type legal, which is useful when
1652 // lowering a splat after type legalization.
1653 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1654                                 SelectionDAG &DAG,
1655                                 const RISCVSubtarget &Subtarget) {
1656   if (VT.isFloatingPoint())
1657     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1658 
1659   MVT XLenVT = Subtarget.getXLenVT();
1660 
1661   // Simplest case is that the operand needs to be promoted to XLenVT.
1662   if (Scalar.getValueType().bitsLE(XLenVT)) {
1663     // If the operand is a constant, sign extend to increase our chances
1664     // of being able to use a .vi instruction. ANY_EXTEND would become a
1665     // a zero extend and the simm5 check in isel would fail.
1666     // FIXME: Should we ignore the upper bits in isel instead?
1667     unsigned ExtOpc =
1668         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1669     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1670     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1671   }
1672 
1673   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1674          "Unexpected scalar for splat lowering!");
1675 
1676   // Otherwise use the more complicated splatting algorithm.
1677   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
1678 }
1679 
1680 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1681                                    const RISCVSubtarget &Subtarget) {
1682   SDValue V1 = Op.getOperand(0);
1683   SDValue V2 = Op.getOperand(1);
1684   SDLoc DL(Op);
1685   MVT XLenVT = Subtarget.getXLenVT();
1686   MVT VT = Op.getSimpleValueType();
1687   unsigned NumElts = VT.getVectorNumElements();
1688   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1689 
1690   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1691 
1692   SDValue TrueMask, VL;
1693   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1694 
1695   if (SVN->isSplat()) {
1696     const int Lane = SVN->getSplatIndex();
1697     if (Lane >= 0) {
1698       MVT SVT = VT.getVectorElementType();
1699 
1700       // Turn splatted vector load into a strided load with an X0 stride.
1701       SDValue V = V1;
1702       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1703       // with undef.
1704       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1705       int Offset = Lane;
1706       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1707         int OpElements =
1708             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1709         V = V.getOperand(Offset / OpElements);
1710         Offset %= OpElements;
1711       }
1712 
1713       // We need to ensure the load isn't atomic or volatile.
1714       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1715         auto *Ld = cast<LoadSDNode>(V);
1716         Offset *= SVT.getStoreSize();
1717         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1718                                                    TypeSize::Fixed(Offset), DL);
1719 
1720         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1721         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1722           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1723           SDValue IntID =
1724               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1725           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1726                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1727           SDValue NewLoad = DAG.getMemIntrinsicNode(
1728               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1729               DAG.getMachineFunction().getMachineMemOperand(
1730                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1731           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1732           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1733         }
1734 
1735         // Otherwise use a scalar load and splat. This will give the best
1736         // opportunity to fold a splat into the operation. ISel can turn it into
1737         // the x0 strided load if we aren't able to fold away the select.
1738         if (SVT.isFloatingPoint())
1739           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1740                           Ld->getPointerInfo().getWithOffset(Offset),
1741                           Ld->getOriginalAlign(),
1742                           Ld->getMemOperand()->getFlags());
1743         else
1744           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1745                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1746                              Ld->getOriginalAlign(),
1747                              Ld->getMemOperand()->getFlags());
1748         DAG.makeEquivalentMemoryOrdering(Ld, V);
1749 
1750         unsigned Opc =
1751             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1752         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1753         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1754       }
1755 
1756       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1757       assert(Lane < (int)NumElts && "Unexpected lane!");
1758       SDValue Gather =
1759           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1760                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1761       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1762     }
1763   }
1764 
1765   // Detect shuffles which can be re-expressed as vector selects; these are
1766   // shuffles in which each element in the destination is taken from an element
1767   // at the corresponding index in either source vectors.
1768   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1769     int MaskIndex = MaskIdx.value();
1770     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1771   });
1772 
1773   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1774 
1775   SmallVector<SDValue> MaskVals;
1776   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1777   // merged with a second vrgather.
1778   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1779 
1780   // By default we preserve the original operand order, and use a mask to
1781   // select LHS as true and RHS as false. However, since RVV vector selects may
1782   // feature splats but only on the LHS, we may choose to invert our mask and
1783   // instead select between RHS and LHS.
1784   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1785   bool InvertMask = IsSelect == SwapOps;
1786 
1787   // Now construct the mask that will be used by the vselect or blended
1788   // vrgather operation. For vrgathers, construct the appropriate indices into
1789   // each vector.
1790   for (int MaskIndex : SVN->getMask()) {
1791     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1792     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1793     if (!IsSelect) {
1794       bool IsLHS = MaskIndex < (int)NumElts;
1795       // For "undef" elements of -1, shuffle in element 0 instead.
1796       GatherIndicesLHS.push_back(
1797           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1798       // TODO: If we're masking out unused elements anyway, it might produce
1799       // better code if we use the most-common element index instead of 0.
1800       GatherIndicesRHS.push_back(
1801           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1802     }
1803   }
1804 
1805   if (SwapOps) {
1806     std::swap(V1, V2);
1807     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1808   }
1809 
1810   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1811   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1812   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1813 
1814   if (IsSelect)
1815     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1816 
1817   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1818     // On such a large vector we're unable to use i8 as the index type.
1819     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1820     // may involve vector splitting if we're already at LMUL=8, or our
1821     // user-supplied maximum fixed-length LMUL.
1822     return SDValue();
1823   }
1824 
1825   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1826   MVT IndexVT = VT.changeTypeToInteger();
1827   // Since we can't introduce illegal index types at this stage, use i16 and
1828   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1829   // than XLenVT.
1830   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1831     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1832     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1833   }
1834 
1835   MVT IndexContainerVT =
1836       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1837 
1838   SDValue Gather;
1839   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1840   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1841   if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
1842     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1843   } else {
1844     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1845     LHSIndices =
1846         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1847 
1848     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1849     Gather =
1850         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1851   }
1852 
1853   // If a second vector operand is used by this shuffle, blend it in with an
1854   // additional vrgather.
1855   if (!V2.isUndef()) {
1856     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1857     SelectMask =
1858         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1859 
1860     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1861     RHSIndices =
1862         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1863 
1864     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1865     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1866     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1867                          Gather, VL);
1868   }
1869 
1870   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1871 }
1872 
1873 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1874                                      SDLoc DL, SelectionDAG &DAG,
1875                                      const RISCVSubtarget &Subtarget) {
1876   if (VT.isScalableVector())
1877     return DAG.getFPExtendOrRound(Op, DL, VT);
1878   assert(VT.isFixedLengthVector() &&
1879          "Unexpected value type for RVV FP extend/round lowering");
1880   SDValue Mask, VL;
1881   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1882   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1883                         ? RISCVISD::FP_EXTEND_VL
1884                         : RISCVISD::FP_ROUND_VL;
1885   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1886 }
1887 
1888 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1889                                             SelectionDAG &DAG) const {
1890   switch (Op.getOpcode()) {
1891   default:
1892     report_fatal_error("unimplemented operand");
1893   case ISD::GlobalAddress:
1894     return lowerGlobalAddress(Op, DAG);
1895   case ISD::BlockAddress:
1896     return lowerBlockAddress(Op, DAG);
1897   case ISD::ConstantPool:
1898     return lowerConstantPool(Op, DAG);
1899   case ISD::JumpTable:
1900     return lowerJumpTable(Op, DAG);
1901   case ISD::GlobalTLSAddress:
1902     return lowerGlobalTLSAddress(Op, DAG);
1903   case ISD::SELECT:
1904     return lowerSELECT(Op, DAG);
1905   case ISD::BRCOND:
1906     return lowerBRCOND(Op, DAG);
1907   case ISD::VASTART:
1908     return lowerVASTART(Op, DAG);
1909   case ISD::FRAMEADDR:
1910     return lowerFRAMEADDR(Op, DAG);
1911   case ISD::RETURNADDR:
1912     return lowerRETURNADDR(Op, DAG);
1913   case ISD::SHL_PARTS:
1914     return lowerShiftLeftParts(Op, DAG);
1915   case ISD::SRA_PARTS:
1916     return lowerShiftRightParts(Op, DAG, true);
1917   case ISD::SRL_PARTS:
1918     return lowerShiftRightParts(Op, DAG, false);
1919   case ISD::BITCAST: {
1920     SDLoc DL(Op);
1921     EVT VT = Op.getValueType();
1922     SDValue Op0 = Op.getOperand(0);
1923     EVT Op0VT = Op0.getValueType();
1924     MVT XLenVT = Subtarget.getXLenVT();
1925     if (VT.isFixedLengthVector()) {
1926       // We can handle fixed length vector bitcasts with a simple replacement
1927       // in isel.
1928       if (Op0VT.isFixedLengthVector())
1929         return Op;
1930       // When bitcasting from scalar to fixed-length vector, insert the scalar
1931       // into a one-element vector of the result type, and perform a vector
1932       // bitcast.
1933       if (!Op0VT.isVector()) {
1934         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
1935         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
1936                                               DAG.getUNDEF(BVT), Op0,
1937                                               DAG.getConstant(0, DL, XLenVT)));
1938       }
1939       return SDValue();
1940     }
1941     // Custom-legalize bitcasts from fixed-length vector types to scalar types
1942     // thus: bitcast the vector to a one-element vector type whose element type
1943     // is the same as the result type, and extract the first element.
1944     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
1945       LLVMContext &Context = *DAG.getContext();
1946       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
1947       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
1948                          DAG.getConstant(0, DL, XLenVT));
1949     }
1950     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
1951       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
1952       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1953       return FPConv;
1954     }
1955     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
1956         Subtarget.hasStdExtF()) {
1957       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1958       SDValue FPConv =
1959           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1960       return FPConv;
1961     }
1962     return SDValue();
1963   }
1964   case ISD::INTRINSIC_WO_CHAIN:
1965     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1966   case ISD::INTRINSIC_W_CHAIN:
1967     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1968   case ISD::BSWAP:
1969   case ISD::BITREVERSE: {
1970     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1971     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1972     MVT VT = Op.getSimpleValueType();
1973     SDLoc DL(Op);
1974     // Start with the maximum immediate value which is the bitwidth - 1.
1975     unsigned Imm = VT.getSizeInBits() - 1;
1976     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1977     if (Op.getOpcode() == ISD::BSWAP)
1978       Imm &= ~0x7U;
1979     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
1980                        DAG.getConstant(Imm, DL, VT));
1981   }
1982   case ISD::FSHL:
1983   case ISD::FSHR: {
1984     MVT VT = Op.getSimpleValueType();
1985     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1986     SDLoc DL(Op);
1987     if (Op.getOperand(2).getOpcode() == ISD::Constant)
1988       return Op;
1989     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1990     // use log(XLen) bits. Mask the shift amount accordingly.
1991     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
1992     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
1993                                 DAG.getConstant(ShAmtWidth, DL, VT));
1994     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
1995     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
1996   }
1997   case ISD::TRUNCATE: {
1998     SDLoc DL(Op);
1999     MVT VT = Op.getSimpleValueType();
2000     // Only custom-lower vector truncates
2001     if (!VT.isVector())
2002       return Op;
2003 
2004     // Truncates to mask types are handled differently
2005     if (VT.getVectorElementType() == MVT::i1)
2006       return lowerVectorMaskTrunc(Op, DAG);
2007 
2008     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2009     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2010     // truncate by one power of two at a time.
2011     MVT DstEltVT = VT.getVectorElementType();
2012 
2013     SDValue Src = Op.getOperand(0);
2014     MVT SrcVT = Src.getSimpleValueType();
2015     MVT SrcEltVT = SrcVT.getVectorElementType();
2016 
2017     assert(DstEltVT.bitsLT(SrcEltVT) &&
2018            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2019            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2020            "Unexpected vector truncate lowering");
2021 
2022     MVT ContainerVT = SrcVT;
2023     if (SrcVT.isFixedLengthVector()) {
2024       ContainerVT = getContainerForFixedLengthVector(SrcVT);
2025       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2026     }
2027 
2028     SDValue Result = Src;
2029     SDValue Mask, VL;
2030     std::tie(Mask, VL) =
2031         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2032     LLVMContext &Context = *DAG.getContext();
2033     const ElementCount Count = ContainerVT.getVectorElementCount();
2034     do {
2035       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2036       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2037       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2038                            Mask, VL);
2039     } while (SrcEltVT != DstEltVT);
2040 
2041     if (SrcVT.isFixedLengthVector())
2042       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2043 
2044     return Result;
2045   }
2046   case ISD::ANY_EXTEND:
2047   case ISD::ZERO_EXTEND:
2048     if (Op.getOperand(0).getValueType().isVector() &&
2049         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2050       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2051     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2052   case ISD::SIGN_EXTEND:
2053     if (Op.getOperand(0).getValueType().isVector() &&
2054         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2055       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2056     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2057   case ISD::SPLAT_VECTOR_PARTS:
2058     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2059   case ISD::INSERT_VECTOR_ELT:
2060     return lowerINSERT_VECTOR_ELT(Op, DAG);
2061   case ISD::EXTRACT_VECTOR_ELT:
2062     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2063   case ISD::VSCALE: {
2064     MVT VT = Op.getSimpleValueType();
2065     SDLoc DL(Op);
2066     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2067     // We define our scalable vector types for lmul=1 to use a 64 bit known
2068     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2069     // vscale as VLENB / 8.
2070     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2071     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2072                                  DAG.getConstant(3, DL, VT));
2073     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2074   }
2075   case ISD::FP_EXTEND: {
2076     // RVV can only do fp_extend to types double the size as the source. We
2077     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2078     // via f32.
2079     SDLoc DL(Op);
2080     MVT VT = Op.getSimpleValueType();
2081     SDValue Src = Op.getOperand(0);
2082     MVT SrcVT = Src.getSimpleValueType();
2083 
2084     // Prepare any fixed-length vector operands.
2085     MVT ContainerVT = VT;
2086     if (SrcVT.isFixedLengthVector()) {
2087       ContainerVT = getContainerForFixedLengthVector(VT);
2088       MVT SrcContainerVT =
2089           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2090       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2091     }
2092 
2093     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2094         SrcVT.getVectorElementType() != MVT::f16) {
2095       // For scalable vectors, we only need to close the gap between
2096       // vXf16->vXf64.
2097       if (!VT.isFixedLengthVector())
2098         return Op;
2099       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2100       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2101       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2102     }
2103 
2104     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2105     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2106     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2107         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2108 
2109     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2110                                            DL, DAG, Subtarget);
2111     if (VT.isFixedLengthVector())
2112       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2113     return Extend;
2114   }
2115   case ISD::FP_ROUND: {
2116     // RVV can only do fp_round to types half the size as the source. We
2117     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2118     // conversion instruction.
2119     SDLoc DL(Op);
2120     MVT VT = Op.getSimpleValueType();
2121     SDValue Src = Op.getOperand(0);
2122     MVT SrcVT = Src.getSimpleValueType();
2123 
2124     // Prepare any fixed-length vector operands.
2125     MVT ContainerVT = VT;
2126     if (VT.isFixedLengthVector()) {
2127       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2128       ContainerVT =
2129           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2130       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2131     }
2132 
2133     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2134         SrcVT.getVectorElementType() != MVT::f64) {
2135       // For scalable vectors, we only need to close the gap between
2136       // vXf64<->vXf16.
2137       if (!VT.isFixedLengthVector())
2138         return Op;
2139       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2140       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2141       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2142     }
2143 
2144     SDValue Mask, VL;
2145     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2146 
2147     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2148     SDValue IntermediateRound =
2149         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2150     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2151                                           DL, DAG, Subtarget);
2152 
2153     if (VT.isFixedLengthVector())
2154       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2155     return Round;
2156   }
2157   case ISD::FP_TO_SINT:
2158   case ISD::FP_TO_UINT:
2159   case ISD::SINT_TO_FP:
2160   case ISD::UINT_TO_FP: {
2161     // RVV can only do fp<->int conversions to types half/double the size as
2162     // the source. We custom-lower any conversions that do two hops into
2163     // sequences.
2164     MVT VT = Op.getSimpleValueType();
2165     if (!VT.isVector())
2166       return Op;
2167     SDLoc DL(Op);
2168     SDValue Src = Op.getOperand(0);
2169     MVT EltVT = VT.getVectorElementType();
2170     MVT SrcVT = Src.getSimpleValueType();
2171     MVT SrcEltVT = SrcVT.getVectorElementType();
2172     unsigned EltSize = EltVT.getSizeInBits();
2173     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2174     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2175            "Unexpected vector element types");
2176 
2177     bool IsInt2FP = SrcEltVT.isInteger();
2178     // Widening conversions
2179     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2180       if (IsInt2FP) {
2181         // Do a regular integer sign/zero extension then convert to float.
2182         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2183                                       VT.getVectorElementCount());
2184         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2185                                  ? ISD::ZERO_EXTEND
2186                                  : ISD::SIGN_EXTEND;
2187         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2188         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2189       }
2190       // FP2Int
2191       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2192       // Do one doubling fp_extend then complete the operation by converting
2193       // to int.
2194       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2195       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2196       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2197     }
2198 
2199     // Narrowing conversions
2200     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2201       if (IsInt2FP) {
2202         // One narrowing int_to_fp, then an fp_round.
2203         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2204         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2205         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2206         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2207       }
2208       // FP2Int
2209       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2210       // representable by the integer, the result is poison.
2211       MVT IVecVT =
2212           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2213                            VT.getVectorElementCount());
2214       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2215       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2216     }
2217 
2218     // Scalable vectors can exit here. Patterns will handle equally-sized
2219     // conversions halving/doubling ones.
2220     if (!VT.isFixedLengthVector())
2221       return Op;
2222 
2223     // For fixed-length vectors we lower to a custom "VL" node.
2224     unsigned RVVOpc = 0;
2225     switch (Op.getOpcode()) {
2226     default:
2227       llvm_unreachable("Impossible opcode");
2228     case ISD::FP_TO_SINT:
2229       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2230       break;
2231     case ISD::FP_TO_UINT:
2232       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2233       break;
2234     case ISD::SINT_TO_FP:
2235       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2236       break;
2237     case ISD::UINT_TO_FP:
2238       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2239       break;
2240     }
2241 
2242     MVT ContainerVT, SrcContainerVT;
2243     // Derive the reference container type from the larger vector type.
2244     if (SrcEltSize > EltSize) {
2245       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2246       ContainerVT =
2247           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2248     } else {
2249       ContainerVT = getContainerForFixedLengthVector(VT);
2250       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2251     }
2252 
2253     SDValue Mask, VL;
2254     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2255 
2256     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2257     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2258     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2259   }
2260   case ISD::VECREDUCE_ADD:
2261   case ISD::VECREDUCE_UMAX:
2262   case ISD::VECREDUCE_SMAX:
2263   case ISD::VECREDUCE_UMIN:
2264   case ISD::VECREDUCE_SMIN:
2265     return lowerVECREDUCE(Op, DAG);
2266   case ISD::VECREDUCE_AND:
2267   case ISD::VECREDUCE_OR:
2268   case ISD::VECREDUCE_XOR:
2269     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2270       return lowerVectorMaskVECREDUCE(Op, DAG);
2271     return lowerVECREDUCE(Op, DAG);
2272   case ISD::VECREDUCE_FADD:
2273   case ISD::VECREDUCE_SEQ_FADD:
2274   case ISD::VECREDUCE_FMIN:
2275   case ISD::VECREDUCE_FMAX:
2276     return lowerFPVECREDUCE(Op, DAG);
2277   case ISD::INSERT_SUBVECTOR:
2278     return lowerINSERT_SUBVECTOR(Op, DAG);
2279   case ISD::EXTRACT_SUBVECTOR:
2280     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2281   case ISD::STEP_VECTOR:
2282     return lowerSTEP_VECTOR(Op, DAG);
2283   case ISD::VECTOR_REVERSE:
2284     return lowerVECTOR_REVERSE(Op, DAG);
2285   case ISD::BUILD_VECTOR:
2286     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2287   case ISD::SPLAT_VECTOR:
2288     if (Op.getValueType().getVectorElementType() == MVT::i1)
2289       return lowerVectorMaskSplat(Op, DAG);
2290     return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2291   case ISD::VECTOR_SHUFFLE:
2292     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2293   case ISD::CONCAT_VECTORS: {
2294     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2295     // better than going through the stack, as the default expansion does.
2296     SDLoc DL(Op);
2297     MVT VT = Op.getSimpleValueType();
2298     unsigned NumOpElts =
2299         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2300     SDValue Vec = DAG.getUNDEF(VT);
2301     for (const auto &OpIdx : enumerate(Op->ops()))
2302       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2303                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2304     return Vec;
2305   }
2306   case ISD::LOAD:
2307     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2308   case ISD::STORE:
2309     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2310   case ISD::MLOAD:
2311     return lowerMLOAD(Op, DAG);
2312   case ISD::MSTORE:
2313     return lowerMSTORE(Op, DAG);
2314   case ISD::SETCC:
2315     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2316   case ISD::ADD:
2317     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2318   case ISD::SUB:
2319     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2320   case ISD::MUL:
2321     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2322   case ISD::MULHS:
2323     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2324   case ISD::MULHU:
2325     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2326   case ISD::AND:
2327     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2328                                               RISCVISD::AND_VL);
2329   case ISD::OR:
2330     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2331                                               RISCVISD::OR_VL);
2332   case ISD::XOR:
2333     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2334                                               RISCVISD::XOR_VL);
2335   case ISD::SDIV:
2336     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2337   case ISD::SREM:
2338     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2339   case ISD::UDIV:
2340     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2341   case ISD::UREM:
2342     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2343   case ISD::SHL:
2344     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2345   case ISD::SRA:
2346     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2347   case ISD::SRL:
2348     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2349   case ISD::FADD:
2350     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2351   case ISD::FSUB:
2352     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2353   case ISD::FMUL:
2354     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2355   case ISD::FDIV:
2356     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2357   case ISD::FNEG:
2358     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2359   case ISD::FABS:
2360     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2361   case ISD::FSQRT:
2362     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2363   case ISD::FMA:
2364     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2365   case ISD::SMIN:
2366     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2367   case ISD::SMAX:
2368     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2369   case ISD::UMIN:
2370     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2371   case ISD::UMAX:
2372     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2373   case ISD::FMINNUM:
2374     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2375   case ISD::FMAXNUM:
2376     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2377   case ISD::ABS:
2378     return lowerABS(Op, DAG);
2379   case ISD::VSELECT:
2380     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2381   case ISD::FCOPYSIGN:
2382     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2383   case ISD::MGATHER:
2384     return lowerMGATHER(Op, DAG);
2385   case ISD::MSCATTER:
2386     return lowerMSCATTER(Op, DAG);
2387   case ISD::FLT_ROUNDS_:
2388     return lowerGET_ROUNDING(Op, DAG);
2389   case ISD::SET_ROUNDING:
2390     return lowerSET_ROUNDING(Op, DAG);
2391   case ISD::VP_ADD:
2392     return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2393   case ISD::VP_SUB:
2394     return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2395   case ISD::VP_MUL:
2396     return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2397   case ISD::VP_SDIV:
2398     return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2399   case ISD::VP_UDIV:
2400     return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2401   case ISD::VP_SREM:
2402     return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2403   case ISD::VP_UREM:
2404     return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2405   case ISD::VP_AND:
2406     return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2407   case ISD::VP_OR:
2408     return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2409   case ISD::VP_XOR:
2410     return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2411   case ISD::VP_ASHR:
2412     return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2413   case ISD::VP_LSHR:
2414     return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2415   case ISD::VP_SHL:
2416     return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2417   }
2418 }
2419 
2420 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2421                              SelectionDAG &DAG, unsigned Flags) {
2422   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2423 }
2424 
2425 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2426                              SelectionDAG &DAG, unsigned Flags) {
2427   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2428                                    Flags);
2429 }
2430 
2431 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2432                              SelectionDAG &DAG, unsigned Flags) {
2433   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2434                                    N->getOffset(), Flags);
2435 }
2436 
2437 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2438                              SelectionDAG &DAG, unsigned Flags) {
2439   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2440 }
2441 
2442 template <class NodeTy>
2443 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2444                                      bool IsLocal) const {
2445   SDLoc DL(N);
2446   EVT Ty = getPointerTy(DAG.getDataLayout());
2447 
2448   if (isPositionIndependent()) {
2449     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2450     if (IsLocal)
2451       // Use PC-relative addressing to access the symbol. This generates the
2452       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2453       // %pcrel_lo(auipc)).
2454       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2455 
2456     // Use PC-relative addressing to access the GOT for this symbol, then load
2457     // the address from the GOT. This generates the pattern (PseudoLA sym),
2458     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2459     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2460   }
2461 
2462   switch (getTargetMachine().getCodeModel()) {
2463   default:
2464     report_fatal_error("Unsupported code model for lowering");
2465   case CodeModel::Small: {
2466     // Generate a sequence for accessing addresses within the first 2 GiB of
2467     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2468     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2469     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2470     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2471     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2472   }
2473   case CodeModel::Medium: {
2474     // Generate a sequence for accessing addresses within any 2GiB range within
2475     // the address space. This generates the pattern (PseudoLLA sym), which
2476     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2477     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2478     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2479   }
2480   }
2481 }
2482 
2483 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2484                                                 SelectionDAG &DAG) const {
2485   SDLoc DL(Op);
2486   EVT Ty = Op.getValueType();
2487   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2488   int64_t Offset = N->getOffset();
2489   MVT XLenVT = Subtarget.getXLenVT();
2490 
2491   const GlobalValue *GV = N->getGlobal();
2492   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2493   SDValue Addr = getAddr(N, DAG, IsLocal);
2494 
2495   // In order to maximise the opportunity for common subexpression elimination,
2496   // emit a separate ADD node for the global address offset instead of folding
2497   // it in the global address node. Later peephole optimisations may choose to
2498   // fold it back in when profitable.
2499   if (Offset != 0)
2500     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2501                        DAG.getConstant(Offset, DL, XLenVT));
2502   return Addr;
2503 }
2504 
2505 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2506                                                SelectionDAG &DAG) const {
2507   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2508 
2509   return getAddr(N, DAG);
2510 }
2511 
2512 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2513                                                SelectionDAG &DAG) const {
2514   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2515 
2516   return getAddr(N, DAG);
2517 }
2518 
2519 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2520                                             SelectionDAG &DAG) const {
2521   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2522 
2523   return getAddr(N, DAG);
2524 }
2525 
2526 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2527                                               SelectionDAG &DAG,
2528                                               bool UseGOT) const {
2529   SDLoc DL(N);
2530   EVT Ty = getPointerTy(DAG.getDataLayout());
2531   const GlobalValue *GV = N->getGlobal();
2532   MVT XLenVT = Subtarget.getXLenVT();
2533 
2534   if (UseGOT) {
2535     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2536     // load the address from the GOT and add the thread pointer. This generates
2537     // the pattern (PseudoLA_TLS_IE sym), which expands to
2538     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2539     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2540     SDValue Load =
2541         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2542 
2543     // Add the thread pointer.
2544     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2545     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2546   }
2547 
2548   // Generate a sequence for accessing the address relative to the thread
2549   // pointer, with the appropriate adjustment for the thread pointer offset.
2550   // This generates the pattern
2551   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2552   SDValue AddrHi =
2553       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2554   SDValue AddrAdd =
2555       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2556   SDValue AddrLo =
2557       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2558 
2559   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2560   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2561   SDValue MNAdd = SDValue(
2562       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2563       0);
2564   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2565 }
2566 
2567 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2568                                                SelectionDAG &DAG) const {
2569   SDLoc DL(N);
2570   EVT Ty = getPointerTy(DAG.getDataLayout());
2571   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2572   const GlobalValue *GV = N->getGlobal();
2573 
2574   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2575   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2576   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2577   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2578   SDValue Load =
2579       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2580 
2581   // Prepare argument list to generate call.
2582   ArgListTy Args;
2583   ArgListEntry Entry;
2584   Entry.Node = Load;
2585   Entry.Ty = CallTy;
2586   Args.push_back(Entry);
2587 
2588   // Setup call to __tls_get_addr.
2589   TargetLowering::CallLoweringInfo CLI(DAG);
2590   CLI.setDebugLoc(DL)
2591       .setChain(DAG.getEntryNode())
2592       .setLibCallee(CallingConv::C, CallTy,
2593                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2594                     std::move(Args));
2595 
2596   return LowerCallTo(CLI).first;
2597 }
2598 
2599 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2600                                                    SelectionDAG &DAG) const {
2601   SDLoc DL(Op);
2602   EVT Ty = Op.getValueType();
2603   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2604   int64_t Offset = N->getOffset();
2605   MVT XLenVT = Subtarget.getXLenVT();
2606 
2607   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2608 
2609   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2610       CallingConv::GHC)
2611     report_fatal_error("In GHC calling convention TLS is not supported");
2612 
2613   SDValue Addr;
2614   switch (Model) {
2615   case TLSModel::LocalExec:
2616     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2617     break;
2618   case TLSModel::InitialExec:
2619     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2620     break;
2621   case TLSModel::LocalDynamic:
2622   case TLSModel::GeneralDynamic:
2623     Addr = getDynamicTLSAddr(N, DAG);
2624     break;
2625   }
2626 
2627   // In order to maximise the opportunity for common subexpression elimination,
2628   // emit a separate ADD node for the global address offset instead of folding
2629   // it in the global address node. Later peephole optimisations may choose to
2630   // fold it back in when profitable.
2631   if (Offset != 0)
2632     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2633                        DAG.getConstant(Offset, DL, XLenVT));
2634   return Addr;
2635 }
2636 
2637 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2638   SDValue CondV = Op.getOperand(0);
2639   SDValue TrueV = Op.getOperand(1);
2640   SDValue FalseV = Op.getOperand(2);
2641   SDLoc DL(Op);
2642   MVT XLenVT = Subtarget.getXLenVT();
2643 
2644   // If the result type is XLenVT and CondV is the output of a SETCC node
2645   // which also operated on XLenVT inputs, then merge the SETCC node into the
2646   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2647   // compare+branch instructions. i.e.:
2648   // (select (setcc lhs, rhs, cc), truev, falsev)
2649   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2650   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2651       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2652     SDValue LHS = CondV.getOperand(0);
2653     SDValue RHS = CondV.getOperand(1);
2654     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2655     ISD::CondCode CCVal = CC->get();
2656 
2657     // Special case for a select of 2 constants that have a diffence of 1.
2658     // Normally this is done by DAGCombine, but if the select is introduced by
2659     // type legalization or op legalization, we miss it. Restricting to SETLT
2660     // case for now because that is what signed saturating add/sub need.
2661     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2662     // but we would probably want to swap the true/false values if the condition
2663     // is SETGE/SETLE to avoid an XORI.
2664     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2665         CCVal == ISD::SETLT) {
2666       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2667       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2668       if (TrueVal - 1 == FalseVal)
2669         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2670       if (TrueVal + 1 == FalseVal)
2671         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2672     }
2673 
2674     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2675 
2676     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2677     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2678     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2679   }
2680 
2681   // Otherwise:
2682   // (select condv, truev, falsev)
2683   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2684   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2685   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2686 
2687   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2688 
2689   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2690 }
2691 
2692 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2693   SDValue CondV = Op.getOperand(1);
2694   SDLoc DL(Op);
2695   MVT XLenVT = Subtarget.getXLenVT();
2696 
2697   if (CondV.getOpcode() == ISD::SETCC &&
2698       CondV.getOperand(0).getValueType() == XLenVT) {
2699     SDValue LHS = CondV.getOperand(0);
2700     SDValue RHS = CondV.getOperand(1);
2701     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2702 
2703     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2704 
2705     SDValue TargetCC = DAG.getCondCode(CCVal);
2706     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2707                        LHS, RHS, TargetCC, Op.getOperand(2));
2708   }
2709 
2710   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2711                      CondV, DAG.getConstant(0, DL, XLenVT),
2712                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2713 }
2714 
2715 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2716   MachineFunction &MF = DAG.getMachineFunction();
2717   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2718 
2719   SDLoc DL(Op);
2720   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2721                                  getPointerTy(MF.getDataLayout()));
2722 
2723   // vastart just stores the address of the VarArgsFrameIndex slot into the
2724   // memory location argument.
2725   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2726   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2727                       MachinePointerInfo(SV));
2728 }
2729 
2730 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2731                                             SelectionDAG &DAG) const {
2732   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2733   MachineFunction &MF = DAG.getMachineFunction();
2734   MachineFrameInfo &MFI = MF.getFrameInfo();
2735   MFI.setFrameAddressIsTaken(true);
2736   Register FrameReg = RI.getFrameRegister(MF);
2737   int XLenInBytes = Subtarget.getXLen() / 8;
2738 
2739   EVT VT = Op.getValueType();
2740   SDLoc DL(Op);
2741   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2742   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2743   while (Depth--) {
2744     int Offset = -(XLenInBytes * 2);
2745     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2746                               DAG.getIntPtrConstant(Offset, DL));
2747     FrameAddr =
2748         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2749   }
2750   return FrameAddr;
2751 }
2752 
2753 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2754                                              SelectionDAG &DAG) const {
2755   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2756   MachineFunction &MF = DAG.getMachineFunction();
2757   MachineFrameInfo &MFI = MF.getFrameInfo();
2758   MFI.setReturnAddressIsTaken(true);
2759   MVT XLenVT = Subtarget.getXLenVT();
2760   int XLenInBytes = Subtarget.getXLen() / 8;
2761 
2762   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2763     return SDValue();
2764 
2765   EVT VT = Op.getValueType();
2766   SDLoc DL(Op);
2767   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2768   if (Depth) {
2769     int Off = -XLenInBytes;
2770     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2771     SDValue Offset = DAG.getConstant(Off, DL, VT);
2772     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2773                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2774                        MachinePointerInfo());
2775   }
2776 
2777   // Return the value of the return address register, marking it an implicit
2778   // live-in.
2779   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2780   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2781 }
2782 
2783 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2784                                                  SelectionDAG &DAG) const {
2785   SDLoc DL(Op);
2786   SDValue Lo = Op.getOperand(0);
2787   SDValue Hi = Op.getOperand(1);
2788   SDValue Shamt = Op.getOperand(2);
2789   EVT VT = Lo.getValueType();
2790 
2791   // if Shamt-XLEN < 0: // Shamt < XLEN
2792   //   Lo = Lo << Shamt
2793   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2794   // else:
2795   //   Lo = 0
2796   //   Hi = Lo << (Shamt-XLEN)
2797 
2798   SDValue Zero = DAG.getConstant(0, DL, VT);
2799   SDValue One = DAG.getConstant(1, DL, VT);
2800   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2801   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2802   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2803   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2804 
2805   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2806   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2807   SDValue ShiftRightLo =
2808       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2809   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2810   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2811   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2812 
2813   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2814 
2815   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2816   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2817 
2818   SDValue Parts[2] = {Lo, Hi};
2819   return DAG.getMergeValues(Parts, DL);
2820 }
2821 
2822 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2823                                                   bool IsSRA) const {
2824   SDLoc DL(Op);
2825   SDValue Lo = Op.getOperand(0);
2826   SDValue Hi = Op.getOperand(1);
2827   SDValue Shamt = Op.getOperand(2);
2828   EVT VT = Lo.getValueType();
2829 
2830   // SRA expansion:
2831   //   if Shamt-XLEN < 0: // Shamt < XLEN
2832   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2833   //     Hi = Hi >>s Shamt
2834   //   else:
2835   //     Lo = Hi >>s (Shamt-XLEN);
2836   //     Hi = Hi >>s (XLEN-1)
2837   //
2838   // SRL expansion:
2839   //   if Shamt-XLEN < 0: // Shamt < XLEN
2840   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2841   //     Hi = Hi >>u Shamt
2842   //   else:
2843   //     Lo = Hi >>u (Shamt-XLEN);
2844   //     Hi = 0;
2845 
2846   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2847 
2848   SDValue Zero = DAG.getConstant(0, DL, VT);
2849   SDValue One = DAG.getConstant(1, DL, VT);
2850   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2851   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2852   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2853   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2854 
2855   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2856   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2857   SDValue ShiftLeftHi =
2858       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2859   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2860   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2861   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2862   SDValue HiFalse =
2863       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2864 
2865   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2866 
2867   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2868   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2869 
2870   SDValue Parts[2] = {Lo, Hi};
2871   return DAG.getMergeValues(Parts, DL);
2872 }
2873 
2874 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
2875 // legal equivalently-sized i8 type, so we can use that as a go-between.
2876 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
2877                                                   SelectionDAG &DAG) const {
2878   SDLoc DL(Op);
2879   MVT VT = Op.getSimpleValueType();
2880   SDValue SplatVal = Op.getOperand(0);
2881   // All-zeros or all-ones splats are handled specially.
2882   if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
2883     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2884     return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
2885   }
2886   if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
2887     SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
2888     return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
2889   }
2890   MVT XLenVT = Subtarget.getXLenVT();
2891   assert(SplatVal.getValueType() == XLenVT &&
2892          "Unexpected type for i1 splat value");
2893   MVT InterVT = VT.changeVectorElementType(MVT::i8);
2894   SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
2895                          DAG.getConstant(1, DL, XLenVT));
2896   SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
2897   SDValue Zero = DAG.getConstant(0, DL, InterVT);
2898   return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
2899 }
2900 
2901 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2902 // illegal (currently only vXi64 RV32).
2903 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2904 // them to SPLAT_VECTOR_I64
2905 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2906                                                      SelectionDAG &DAG) const {
2907   SDLoc DL(Op);
2908   MVT VecVT = Op.getSimpleValueType();
2909   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2910          "Unexpected SPLAT_VECTOR_PARTS lowering");
2911 
2912   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2913   SDValue Lo = Op.getOperand(0);
2914   SDValue Hi = Op.getOperand(1);
2915 
2916   if (VecVT.isFixedLengthVector()) {
2917     MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2918     SDLoc DL(Op);
2919     SDValue Mask, VL;
2920     std::tie(Mask, VL) =
2921         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2922 
2923     SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
2924     return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
2925   }
2926 
2927   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2928     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2929     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2930     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2931     // node in order to try and match RVV vector/scalar instructions.
2932     if ((LoC >> 31) == HiC)
2933       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2934   }
2935 
2936   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
2937   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
2938       isa<ConstantSDNode>(Hi.getOperand(1)) &&
2939       Hi.getConstantOperandVal(1) == 31)
2940     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2941 
2942   // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
2943   return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
2944                      DAG.getRegister(RISCV::X0, MVT::i64));
2945 }
2946 
2947 // Custom-lower extensions from mask vectors by using a vselect either with 1
2948 // for zero/any-extension or -1 for sign-extension:
2949 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2950 // Note that any-extension is lowered identically to zero-extension.
2951 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2952                                                 int64_t ExtTrueVal) const {
2953   SDLoc DL(Op);
2954   MVT VecVT = Op.getSimpleValueType();
2955   SDValue Src = Op.getOperand(0);
2956   // Only custom-lower extensions from mask types
2957   assert(Src.getValueType().isVector() &&
2958          Src.getValueType().getVectorElementType() == MVT::i1);
2959 
2960   MVT XLenVT = Subtarget.getXLenVT();
2961   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2962   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2963 
2964   if (VecVT.isScalableVector()) {
2965     // Be careful not to introduce illegal scalar types at this stage, and be
2966     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2967     // illegal and must be expanded. Since we know that the constants are
2968     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2969     bool IsRV32E64 =
2970         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2971 
2972     if (!IsRV32E64) {
2973       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2974       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2975     } else {
2976       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2977       SplatTrueVal =
2978           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2979     }
2980 
2981     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2982   }
2983 
2984   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2985   MVT I1ContainerVT =
2986       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2987 
2988   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2989 
2990   SDValue Mask, VL;
2991   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2992 
2993   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
2994   SplatTrueVal =
2995       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
2996   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
2997                                SplatTrueVal, SplatZero, VL);
2998 
2999   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3000 }
3001 
3002 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3003     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3004   MVT ExtVT = Op.getSimpleValueType();
3005   // Only custom-lower extensions from fixed-length vector types.
3006   if (!ExtVT.isFixedLengthVector())
3007     return Op;
3008   MVT VT = Op.getOperand(0).getSimpleValueType();
3009   // Grab the canonical container type for the extended type. Infer the smaller
3010   // type from that to ensure the same number of vector elements, as we know
3011   // the LMUL will be sufficient to hold the smaller type.
3012   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3013   // Get the extended container type manually to ensure the same number of
3014   // vector elements between source and dest.
3015   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3016                                      ContainerExtVT.getVectorElementCount());
3017 
3018   SDValue Op1 =
3019       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3020 
3021   SDLoc DL(Op);
3022   SDValue Mask, VL;
3023   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3024 
3025   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3026 
3027   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3028 }
3029 
3030 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3031 // setcc operation:
3032 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
3033 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3034                                                   SelectionDAG &DAG) const {
3035   SDLoc DL(Op);
3036   EVT MaskVT = Op.getValueType();
3037   // Only expect to custom-lower truncations to mask types
3038   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3039          "Unexpected type for vector mask lowering");
3040   SDValue Src = Op.getOperand(0);
3041   MVT VecVT = Src.getSimpleValueType();
3042 
3043   // If this is a fixed vector, we need to convert it to a scalable vector.
3044   MVT ContainerVT = VecVT;
3045   if (VecVT.isFixedLengthVector()) {
3046     ContainerVT = getContainerForFixedLengthVector(VecVT);
3047     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3048   }
3049 
3050   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3051   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3052 
3053   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3054   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3055 
3056   if (VecVT.isScalableVector()) {
3057     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3058     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3059   }
3060 
3061   SDValue Mask, VL;
3062   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3063 
3064   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3065   SDValue Trunc =
3066       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3067   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3068                       DAG.getCondCode(ISD::SETNE), Mask, VL);
3069   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3070 }
3071 
3072 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3073 // first position of a vector, and that vector is slid up to the insert index.
3074 // By limiting the active vector length to index+1 and merging with the
3075 // original vector (with an undisturbed tail policy for elements >= VL), we
3076 // achieve the desired result of leaving all elements untouched except the one
3077 // at VL-1, which is replaced with the desired value.
3078 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3079                                                     SelectionDAG &DAG) const {
3080   SDLoc DL(Op);
3081   MVT VecVT = Op.getSimpleValueType();
3082   SDValue Vec = Op.getOperand(0);
3083   SDValue Val = Op.getOperand(1);
3084   SDValue Idx = Op.getOperand(2);
3085 
3086   if (VecVT.getVectorElementType() == MVT::i1) {
3087     // FIXME: For now we just promote to an i8 vector and insert into that,
3088     // but this is probably not optimal.
3089     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3090     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3091     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3092     return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3093   }
3094 
3095   MVT ContainerVT = VecVT;
3096   // If the operand is a fixed-length vector, convert to a scalable one.
3097   if (VecVT.isFixedLengthVector()) {
3098     ContainerVT = getContainerForFixedLengthVector(VecVT);
3099     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3100   }
3101 
3102   MVT XLenVT = Subtarget.getXLenVT();
3103 
3104   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3105   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3106   // Even i64-element vectors on RV32 can be lowered without scalar
3107   // legalization if the most-significant 32 bits of the value are not affected
3108   // by the sign-extension of the lower 32 bits.
3109   // TODO: We could also catch sign extensions of a 32-bit value.
3110   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3111     const auto *CVal = cast<ConstantSDNode>(Val);
3112     if (isInt<32>(CVal->getSExtValue())) {
3113       IsLegalInsert = true;
3114       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3115     }
3116   }
3117 
3118   SDValue Mask, VL;
3119   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3120 
3121   SDValue ValInVec;
3122 
3123   if (IsLegalInsert) {
3124     unsigned Opc =
3125         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3126     if (isNullConstant(Idx)) {
3127       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3128       if (!VecVT.isFixedLengthVector())
3129         return Vec;
3130       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3131     }
3132     ValInVec =
3133         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3134   } else {
3135     // On RV32, i64-element vectors must be specially handled to place the
3136     // value at element 0, by using two vslide1up instructions in sequence on
3137     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3138     // this.
3139     SDValue One = DAG.getConstant(1, DL, XLenVT);
3140     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3141     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3142     MVT I32ContainerVT =
3143         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3144     SDValue I32Mask =
3145         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3146     // Limit the active VL to two.
3147     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3148     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3149     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3150     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3151                            InsertI64VL);
3152     // First slide in the hi value, then the lo in underneath it.
3153     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3154                            ValHi, I32Mask, InsertI64VL);
3155     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3156                            ValLo, I32Mask, InsertI64VL);
3157     // Bitcast back to the right container type.
3158     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3159   }
3160 
3161   // Now that the value is in a vector, slide it into position.
3162   SDValue InsertVL =
3163       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3164   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3165                                 ValInVec, Idx, Mask, InsertVL);
3166   if (!VecVT.isFixedLengthVector())
3167     return Slideup;
3168   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3169 }
3170 
3171 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3172 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3173 // types this is done using VMV_X_S to allow us to glean information about the
3174 // sign bits of the result.
3175 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3176                                                      SelectionDAG &DAG) const {
3177   SDLoc DL(Op);
3178   SDValue Idx = Op.getOperand(1);
3179   SDValue Vec = Op.getOperand(0);
3180   EVT EltVT = Op.getValueType();
3181   MVT VecVT = Vec.getSimpleValueType();
3182   MVT XLenVT = Subtarget.getXLenVT();
3183 
3184   if (VecVT.getVectorElementType() == MVT::i1) {
3185     // FIXME: For now we just promote to an i8 vector and extract from that,
3186     // but this is probably not optimal.
3187     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3188     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3189     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3190   }
3191 
3192   // If this is a fixed vector, we need to convert it to a scalable vector.
3193   MVT ContainerVT = VecVT;
3194   if (VecVT.isFixedLengthVector()) {
3195     ContainerVT = getContainerForFixedLengthVector(VecVT);
3196     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3197   }
3198 
3199   // If the index is 0, the vector is already in the right position.
3200   if (!isNullConstant(Idx)) {
3201     // Use a VL of 1 to avoid processing more elements than we need.
3202     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3203     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3204     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3205     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3206                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3207   }
3208 
3209   if (!EltVT.isInteger()) {
3210     // Floating-point extracts are handled in TableGen.
3211     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3212                        DAG.getConstant(0, DL, XLenVT));
3213   }
3214 
3215   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3216   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3217 }
3218 
3219 // Some RVV intrinsics may claim that they want an integer operand to be
3220 // promoted or expanded.
3221 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3222                                           const RISCVSubtarget &Subtarget) {
3223   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3224           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3225          "Unexpected opcode");
3226 
3227   if (!Subtarget.hasStdExtV())
3228     return SDValue();
3229 
3230   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3231   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3232   SDLoc DL(Op);
3233 
3234   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3235       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3236   if (!II || !II->SplatOperand)
3237     return SDValue();
3238 
3239   unsigned SplatOp = II->SplatOperand + HasChain;
3240   assert(SplatOp < Op.getNumOperands());
3241 
3242   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3243   SDValue &ScalarOp = Operands[SplatOp];
3244   MVT OpVT = ScalarOp.getSimpleValueType();
3245   MVT XLenVT = Subtarget.getXLenVT();
3246 
3247   // If this isn't a scalar, or its type is XLenVT we're done.
3248   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3249     return SDValue();
3250 
3251   // Simplest case is that the operand needs to be promoted to XLenVT.
3252   if (OpVT.bitsLT(XLenVT)) {
3253     // If the operand is a constant, sign extend to increase our chances
3254     // of being able to use a .vi instruction. ANY_EXTEND would become a
3255     // a zero extend and the simm5 check in isel would fail.
3256     // FIXME: Should we ignore the upper bits in isel instead?
3257     unsigned ExtOpc =
3258         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3259     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3260     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3261   }
3262 
3263   // Use the previous operand to get the vXi64 VT. The result might be a mask
3264   // VT for compares. Using the previous operand assumes that the previous
3265   // operand will never have a smaller element size than a scalar operand and
3266   // that a widening operation never uses SEW=64.
3267   // NOTE: If this fails the below assert, we can probably just find the
3268   // element count from any operand or result and use it to construct the VT.
3269   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3270   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3271 
3272   // The more complex case is when the scalar is larger than XLenVT.
3273   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3274          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3275 
3276   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3277   // on the instruction to sign-extend since SEW>XLEN.
3278   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3279     if (isInt<32>(CVal->getSExtValue())) {
3280       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3281       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3282     }
3283   }
3284 
3285   // We need to convert the scalar to a splat vector.
3286   // FIXME: Can we implicitly truncate the scalar if it is known to
3287   // be sign extended?
3288   // VL should be the last operand.
3289   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3290   assert(VL.getValueType() == XLenVT);
3291   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3292   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3293 }
3294 
3295 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3296                                                      SelectionDAG &DAG) const {
3297   unsigned IntNo = Op.getConstantOperandVal(0);
3298   SDLoc DL(Op);
3299   MVT XLenVT = Subtarget.getXLenVT();
3300 
3301   switch (IntNo) {
3302   default:
3303     break; // Don't custom lower most intrinsics.
3304   case Intrinsic::thread_pointer: {
3305     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3306     return DAG.getRegister(RISCV::X4, PtrVT);
3307   }
3308   case Intrinsic::riscv_orc_b:
3309     // Lower to the GORCI encoding for orc.b.
3310     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3311                        DAG.getConstant(7, DL, XLenVT));
3312   case Intrinsic::riscv_grev:
3313   case Intrinsic::riscv_gorc: {
3314     unsigned Opc =
3315         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3316     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3317   }
3318   case Intrinsic::riscv_shfl:
3319   case Intrinsic::riscv_unshfl: {
3320     unsigned Opc =
3321         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3322     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3323   }
3324   case Intrinsic::riscv_bcompress:
3325   case Intrinsic::riscv_bdecompress: {
3326     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3327                                                        : RISCVISD::BDECOMPRESS;
3328     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3329   }
3330   case Intrinsic::riscv_vmv_x_s:
3331     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3332     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3333                        Op.getOperand(1));
3334   case Intrinsic::riscv_vmv_v_x:
3335     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3336                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3337   case Intrinsic::riscv_vfmv_v_f:
3338     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3339                        Op.getOperand(1), Op.getOperand(2));
3340   case Intrinsic::riscv_vmv_s_x: {
3341     SDValue Scalar = Op.getOperand(2);
3342 
3343     if (Scalar.getValueType().bitsLE(XLenVT)) {
3344       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3345       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3346                          Op.getOperand(1), Scalar, Op.getOperand(3));
3347     }
3348 
3349     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3350 
3351     // This is an i64 value that lives in two scalar registers. We have to
3352     // insert this in a convoluted way. First we build vXi64 splat containing
3353     // the/ two values that we assemble using some bit math. Next we'll use
3354     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3355     // to merge element 0 from our splat into the source vector.
3356     // FIXME: This is probably not the best way to do this, but it is
3357     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3358     // point.
3359     //   sw lo, (a0)
3360     //   sw hi, 4(a0)
3361     //   vlse vX, (a0)
3362     //
3363     //   vid.v      vVid
3364     //   vmseq.vx   mMask, vVid, 0
3365     //   vmerge.vvm vDest, vSrc, vVal, mMask
3366     MVT VT = Op.getSimpleValueType();
3367     SDValue Vec = Op.getOperand(1);
3368     SDValue VL = Op.getOperand(3);
3369 
3370     SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3371     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3372                                       DAG.getConstant(0, DL, MVT::i32), VL);
3373 
3374     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3375     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3376     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3377     SDValue SelectCond =
3378         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3379                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3380     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3381                        Vec, VL);
3382   }
3383   case Intrinsic::riscv_vslide1up:
3384   case Intrinsic::riscv_vslide1down:
3385   case Intrinsic::riscv_vslide1up_mask:
3386   case Intrinsic::riscv_vslide1down_mask: {
3387     // We need to special case these when the scalar is larger than XLen.
3388     unsigned NumOps = Op.getNumOperands();
3389     bool IsMasked = NumOps == 6;
3390     unsigned OpOffset = IsMasked ? 1 : 0;
3391     SDValue Scalar = Op.getOperand(2 + OpOffset);
3392     if (Scalar.getValueType().bitsLE(XLenVT))
3393       break;
3394 
3395     // Splatting a sign extended constant is fine.
3396     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3397       if (isInt<32>(CVal->getSExtValue()))
3398         break;
3399 
3400     MVT VT = Op.getSimpleValueType();
3401     assert(VT.getVectorElementType() == MVT::i64 &&
3402            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3403 
3404     // Convert the vector source to the equivalent nxvXi32 vector.
3405     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3406     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3407 
3408     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3409                                    DAG.getConstant(0, DL, XLenVT));
3410     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3411                                    DAG.getConstant(1, DL, XLenVT));
3412 
3413     // Double the VL since we halved SEW.
3414     SDValue VL = Op.getOperand(NumOps - 1);
3415     SDValue I32VL =
3416         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3417 
3418     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3419     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3420 
3421     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3422     // instructions.
3423     if (IntNo == Intrinsic::riscv_vslide1up ||
3424         IntNo == Intrinsic::riscv_vslide1up_mask) {
3425       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3426                         I32Mask, I32VL);
3427       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3428                         I32Mask, I32VL);
3429     } else {
3430       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3431                         I32Mask, I32VL);
3432       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3433                         I32Mask, I32VL);
3434     }
3435 
3436     // Convert back to nxvXi64.
3437     Vec = DAG.getBitcast(VT, Vec);
3438 
3439     if (!IsMasked)
3440       return Vec;
3441 
3442     // Apply mask after the operation.
3443     SDValue Mask = Op.getOperand(NumOps - 2);
3444     SDValue MaskedOff = Op.getOperand(1);
3445     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3446   }
3447   }
3448 
3449   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3450 }
3451 
3452 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3453                                                     SelectionDAG &DAG) const {
3454   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3455 }
3456 
3457 static MVT getLMUL1VT(MVT VT) {
3458   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3459          "Unexpected vector MVT");
3460   return MVT::getScalableVectorVT(
3461       VT.getVectorElementType(),
3462       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3463 }
3464 
3465 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3466   switch (ISDOpcode) {
3467   default:
3468     llvm_unreachable("Unhandled reduction");
3469   case ISD::VECREDUCE_ADD:
3470     return RISCVISD::VECREDUCE_ADD_VL;
3471   case ISD::VECREDUCE_UMAX:
3472     return RISCVISD::VECREDUCE_UMAX_VL;
3473   case ISD::VECREDUCE_SMAX:
3474     return RISCVISD::VECREDUCE_SMAX_VL;
3475   case ISD::VECREDUCE_UMIN:
3476     return RISCVISD::VECREDUCE_UMIN_VL;
3477   case ISD::VECREDUCE_SMIN:
3478     return RISCVISD::VECREDUCE_SMIN_VL;
3479   case ISD::VECREDUCE_AND:
3480     return RISCVISD::VECREDUCE_AND_VL;
3481   case ISD::VECREDUCE_OR:
3482     return RISCVISD::VECREDUCE_OR_VL;
3483   case ISD::VECREDUCE_XOR:
3484     return RISCVISD::VECREDUCE_XOR_VL;
3485   }
3486 }
3487 
3488 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3489                                                       SelectionDAG &DAG) const {
3490   SDLoc DL(Op);
3491   SDValue Vec = Op.getOperand(0);
3492   MVT VecVT = Vec.getSimpleValueType();
3493   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3494           Op.getOpcode() == ISD::VECREDUCE_OR ||
3495           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3496          "Unexpected reduction lowering");
3497 
3498   MVT XLenVT = Subtarget.getXLenVT();
3499   assert(Op.getValueType() == XLenVT &&
3500          "Expected reduction output to be legalized to XLenVT");
3501 
3502   MVT ContainerVT = VecVT;
3503   if (VecVT.isFixedLengthVector()) {
3504     ContainerVT = getContainerForFixedLengthVector(VecVT);
3505     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3506   }
3507 
3508   SDValue Mask, VL;
3509   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3510   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3511 
3512   switch (Op.getOpcode()) {
3513   default:
3514     llvm_unreachable("Unhandled reduction");
3515   case ISD::VECREDUCE_AND:
3516     // vpopc ~x == 0
3517     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3518     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3519     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3520   case ISD::VECREDUCE_OR:
3521     // vpopc x != 0
3522     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3523     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3524   case ISD::VECREDUCE_XOR: {
3525     // ((vpopc x) & 1) != 0
3526     SDValue One = DAG.getConstant(1, DL, XLenVT);
3527     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3528     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3529     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3530   }
3531   }
3532 }
3533 
3534 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3535                                             SelectionDAG &DAG) const {
3536   SDLoc DL(Op);
3537   SDValue Vec = Op.getOperand(0);
3538   EVT VecEVT = Vec.getValueType();
3539 
3540   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3541 
3542   // Due to ordering in legalize types we may have a vector type that needs to
3543   // be split. Do that manually so we can get down to a legal type.
3544   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3545          TargetLowering::TypeSplitVector) {
3546     SDValue Lo, Hi;
3547     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3548     VecEVT = Lo.getValueType();
3549     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3550   }
3551 
3552   // TODO: The type may need to be widened rather than split. Or widened before
3553   // it can be split.
3554   if (!isTypeLegal(VecEVT))
3555     return SDValue();
3556 
3557   MVT VecVT = VecEVT.getSimpleVT();
3558   MVT VecEltVT = VecVT.getVectorElementType();
3559   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3560 
3561   MVT ContainerVT = VecVT;
3562   if (VecVT.isFixedLengthVector()) {
3563     ContainerVT = getContainerForFixedLengthVector(VecVT);
3564     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3565   }
3566 
3567   MVT M1VT = getLMUL1VT(ContainerVT);
3568 
3569   SDValue Mask, VL;
3570   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3571 
3572   // FIXME: This is a VLMAX splat which might be too large and can prevent
3573   // vsetvli removal.
3574   SDValue NeutralElem =
3575       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3576   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3577   SDValue Reduction =
3578       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3579   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3580                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3581   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3582 }
3583 
3584 // Given a reduction op, this function returns the matching reduction opcode,
3585 // the vector SDValue and the scalar SDValue required to lower this to a
3586 // RISCVISD node.
3587 static std::tuple<unsigned, SDValue, SDValue>
3588 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3589   SDLoc DL(Op);
3590   auto Flags = Op->getFlags();
3591   unsigned Opcode = Op.getOpcode();
3592   unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
3593   switch (Opcode) {
3594   default:
3595     llvm_unreachable("Unhandled reduction");
3596   case ISD::VECREDUCE_FADD:
3597     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3598                            DAG.getConstantFP(0.0, DL, EltVT));
3599   case ISD::VECREDUCE_SEQ_FADD:
3600     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3601                            Op.getOperand(0));
3602   case ISD::VECREDUCE_FMIN:
3603     return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
3604                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3605   case ISD::VECREDUCE_FMAX:
3606     return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
3607                            DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
3608   }
3609 }
3610 
3611 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3612                                               SelectionDAG &DAG) const {
3613   SDLoc DL(Op);
3614   MVT VecEltVT = Op.getSimpleValueType();
3615 
3616   unsigned RVVOpcode;
3617   SDValue VectorVal, ScalarVal;
3618   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3619       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3620   MVT VecVT = VectorVal.getSimpleValueType();
3621 
3622   MVT ContainerVT = VecVT;
3623   if (VecVT.isFixedLengthVector()) {
3624     ContainerVT = getContainerForFixedLengthVector(VecVT);
3625     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3626   }
3627 
3628   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3629 
3630   SDValue Mask, VL;
3631   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3632 
3633   // FIXME: This is a VLMAX splat which might be too large and can prevent
3634   // vsetvli removal.
3635   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3636   SDValue Reduction =
3637       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3638   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3639                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3640 }
3641 
3642 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3643                                                    SelectionDAG &DAG) const {
3644   SDValue Vec = Op.getOperand(0);
3645   SDValue SubVec = Op.getOperand(1);
3646   MVT VecVT = Vec.getSimpleValueType();
3647   MVT SubVecVT = SubVec.getSimpleValueType();
3648 
3649   SDLoc DL(Op);
3650   MVT XLenVT = Subtarget.getXLenVT();
3651   unsigned OrigIdx = Op.getConstantOperandVal(2);
3652   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3653 
3654   // We don't have the ability to slide mask vectors up indexed by their i1
3655   // elements; the smallest we can do is i8. Often we are able to bitcast to
3656   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3657   // into a scalable one, we might not necessarily have enough scalable
3658   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3659   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3660       (OrigIdx != 0 || !Vec.isUndef())) {
3661     if (VecVT.getVectorMinNumElements() >= 8 &&
3662         SubVecVT.getVectorMinNumElements() >= 8) {
3663       assert(OrigIdx % 8 == 0 && "Invalid index");
3664       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3665              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3666              "Unexpected mask vector lowering");
3667       OrigIdx /= 8;
3668       SubVecVT =
3669           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3670                            SubVecVT.isScalableVector());
3671       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3672                                VecVT.isScalableVector());
3673       Vec = DAG.getBitcast(VecVT, Vec);
3674       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3675     } else {
3676       // We can't slide this mask vector up indexed by its i1 elements.
3677       // This poses a problem when we wish to insert a scalable vector which
3678       // can't be re-expressed as a larger type. Just choose the slow path and
3679       // extend to a larger type, then truncate back down.
3680       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3681       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3682       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3683       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3684       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3685                         Op.getOperand(2));
3686       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3687       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3688     }
3689   }
3690 
3691   // If the subvector vector is a fixed-length type, we cannot use subregister
3692   // manipulation to simplify the codegen; we don't know which register of a
3693   // LMUL group contains the specific subvector as we only know the minimum
3694   // register size. Therefore we must slide the vector group up the full
3695   // amount.
3696   if (SubVecVT.isFixedLengthVector()) {
3697     if (OrigIdx == 0 && Vec.isUndef())
3698       return Op;
3699     MVT ContainerVT = VecVT;
3700     if (VecVT.isFixedLengthVector()) {
3701       ContainerVT = getContainerForFixedLengthVector(VecVT);
3702       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3703     }
3704     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3705                          DAG.getUNDEF(ContainerVT), SubVec,
3706                          DAG.getConstant(0, DL, XLenVT));
3707     SDValue Mask =
3708         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3709     // Set the vector length to only the number of elements we care about. Note
3710     // that for slideup this includes the offset.
3711     SDValue VL =
3712         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3713     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3714     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3715                                   SubVec, SlideupAmt, Mask, VL);
3716     if (VecVT.isFixedLengthVector())
3717       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3718     return DAG.getBitcast(Op.getValueType(), Slideup);
3719   }
3720 
3721   unsigned SubRegIdx, RemIdx;
3722   std::tie(SubRegIdx, RemIdx) =
3723       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3724           VecVT, SubVecVT, OrigIdx, TRI);
3725 
3726   RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3727   bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
3728                          SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
3729                          SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
3730 
3731   // 1. If the Idx has been completely eliminated and this subvector's size is
3732   // a vector register or a multiple thereof, or the surrounding elements are
3733   // undef, then this is a subvector insert which naturally aligns to a vector
3734   // register. These can easily be handled using subregister manipulation.
3735   // 2. If the subvector is smaller than a vector register, then the insertion
3736   // must preserve the undisturbed elements of the register. We do this by
3737   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3738   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3739   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3740   // LMUL=1 type back into the larger vector (resolving to another subregister
3741   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3742   // to avoid allocating a large register group to hold our subvector.
3743   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3744     return Op;
3745 
3746   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3747   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3748   // (in our case undisturbed). This means we can set up a subvector insertion
3749   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3750   // size of the subvector.
3751   MVT InterSubVT = VecVT;
3752   SDValue AlignedExtract = Vec;
3753   unsigned AlignedIdx = OrigIdx - RemIdx;
3754   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3755     InterSubVT = getLMUL1VT(VecVT);
3756     // Extract a subvector equal to the nearest full vector register type. This
3757     // should resolve to a EXTRACT_SUBREG instruction.
3758     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3759                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3760   }
3761 
3762   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3763   // For scalable vectors this must be further multiplied by vscale.
3764   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3765 
3766   SDValue Mask, VL;
3767   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3768 
3769   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3770   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3771   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3772   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3773 
3774   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3775                        DAG.getUNDEF(InterSubVT), SubVec,
3776                        DAG.getConstant(0, DL, XLenVT));
3777 
3778   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3779                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3780 
3781   // If required, insert this subvector back into the correct vector register.
3782   // This should resolve to an INSERT_SUBREG instruction.
3783   if (VecVT.bitsGT(InterSubVT))
3784     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3785                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3786 
3787   // We might have bitcast from a mask type: cast back to the original type if
3788   // required.
3789   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3790 }
3791 
3792 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3793                                                     SelectionDAG &DAG) const {
3794   SDValue Vec = Op.getOperand(0);
3795   MVT SubVecVT = Op.getSimpleValueType();
3796   MVT VecVT = Vec.getSimpleValueType();
3797 
3798   SDLoc DL(Op);
3799   MVT XLenVT = Subtarget.getXLenVT();
3800   unsigned OrigIdx = Op.getConstantOperandVal(1);
3801   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3802 
3803   // We don't have the ability to slide mask vectors down indexed by their i1
3804   // elements; the smallest we can do is i8. Often we are able to bitcast to
3805   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3806   // from a scalable one, we might not necessarily have enough scalable
3807   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3808   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3809     if (VecVT.getVectorMinNumElements() >= 8 &&
3810         SubVecVT.getVectorMinNumElements() >= 8) {
3811       assert(OrigIdx % 8 == 0 && "Invalid index");
3812       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3813              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3814              "Unexpected mask vector lowering");
3815       OrigIdx /= 8;
3816       SubVecVT =
3817           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3818                            SubVecVT.isScalableVector());
3819       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3820                                VecVT.isScalableVector());
3821       Vec = DAG.getBitcast(VecVT, Vec);
3822     } else {
3823       // We can't slide this mask vector down, indexed by its i1 elements.
3824       // This poses a problem when we wish to extract a scalable vector which
3825       // can't be re-expressed as a larger type. Just choose the slow path and
3826       // extend to a larger type, then truncate back down.
3827       // TODO: We could probably improve this when extracting certain fixed
3828       // from fixed, where we can extract as i8 and shift the correct element
3829       // right to reach the desired subvector?
3830       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3831       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3832       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3833       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3834                         Op.getOperand(1));
3835       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3836       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3837     }
3838   }
3839 
3840   // If the subvector vector is a fixed-length type, we cannot use subregister
3841   // manipulation to simplify the codegen; we don't know which register of a
3842   // LMUL group contains the specific subvector as we only know the minimum
3843   // register size. Therefore we must slide the vector group down the full
3844   // amount.
3845   if (SubVecVT.isFixedLengthVector()) {
3846     // With an index of 0 this is a cast-like subvector, which can be performed
3847     // with subregister operations.
3848     if (OrigIdx == 0)
3849       return Op;
3850     MVT ContainerVT = VecVT;
3851     if (VecVT.isFixedLengthVector()) {
3852       ContainerVT = getContainerForFixedLengthVector(VecVT);
3853       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3854     }
3855     SDValue Mask =
3856         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3857     // Set the vector length to only the number of elements we care about. This
3858     // avoids sliding down elements we're going to discard straight away.
3859     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3860     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3861     SDValue Slidedown =
3862         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3863                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3864     // Now we can use a cast-like subvector extract to get the result.
3865     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3866                             DAG.getConstant(0, DL, XLenVT));
3867     return DAG.getBitcast(Op.getValueType(), Slidedown);
3868   }
3869 
3870   unsigned SubRegIdx, RemIdx;
3871   std::tie(SubRegIdx, RemIdx) =
3872       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3873           VecVT, SubVecVT, OrigIdx, TRI);
3874 
3875   // If the Idx has been completely eliminated then this is a subvector extract
3876   // which naturally aligns to a vector register. These can easily be handled
3877   // using subregister manipulation.
3878   if (RemIdx == 0)
3879     return Op;
3880 
3881   // Else we must shift our vector register directly to extract the subvector.
3882   // Do this using VSLIDEDOWN.
3883 
3884   // If the vector type is an LMUL-group type, extract a subvector equal to the
3885   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3886   // instruction.
3887   MVT InterSubVT = VecVT;
3888   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3889     InterSubVT = getLMUL1VT(VecVT);
3890     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3891                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3892   }
3893 
3894   // Slide this vector register down by the desired number of elements in order
3895   // to place the desired subvector starting at element 0.
3896   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3897   // For scalable vectors this must be further multiplied by vscale.
3898   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3899 
3900   SDValue Mask, VL;
3901   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3902   SDValue Slidedown =
3903       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3904                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3905 
3906   // Now the vector is in the right position, extract our final subvector. This
3907   // should resolve to a COPY.
3908   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3909                           DAG.getConstant(0, DL, XLenVT));
3910 
3911   // We might have bitcast from a mask type: cast back to the original type if
3912   // required.
3913   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3914 }
3915 
3916 // Lower step_vector to the vid instruction. Any non-identity step value must
3917 // be accounted for my manual expansion.
3918 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
3919                                               SelectionDAG &DAG) const {
3920   SDLoc DL(Op);
3921   MVT VT = Op.getSimpleValueType();
3922   MVT XLenVT = Subtarget.getXLenVT();
3923   SDValue Mask, VL;
3924   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
3925   SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3926   uint64_t StepValImm = Op.getConstantOperandVal(0);
3927   if (StepValImm != 1) {
3928     assert(Op.getOperand(0).getValueType() == XLenVT &&
3929            "Unexpected step value type");
3930     if (isPowerOf2_64(StepValImm)) {
3931       SDValue StepVal =
3932           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3933                       DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
3934       StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
3935     } else {
3936       SDValue StepVal =
3937           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Op.getOperand(0));
3938       StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
3939     }
3940   }
3941   return StepVec;
3942 }
3943 
3944 // Implement vector_reverse using vrgather.vv with indices determined by
3945 // subtracting the id of each element from (VLMAX-1). This will convert
3946 // the indices like so:
3947 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3948 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3949 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3950                                                  SelectionDAG &DAG) const {
3951   SDLoc DL(Op);
3952   MVT VecVT = Op.getSimpleValueType();
3953   unsigned EltSize = VecVT.getScalarSizeInBits();
3954   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3955 
3956   unsigned MaxVLMAX = 0;
3957   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3958   if (VectorBitsMax != 0)
3959     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3960 
3961   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3962   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3963 
3964   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3965   // to use vrgatherei16.vv.
3966   // TODO: It's also possible to use vrgatherei16.vv for other types to
3967   // decrease register width for the index calculation.
3968   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3969     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3970     // Reverse each half, then reassemble them in reverse order.
3971     // NOTE: It's also possible that after splitting that VLMAX no longer
3972     // requires vrgatherei16.vv.
3973     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3974       SDValue Lo, Hi;
3975       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3976       EVT LoVT, HiVT;
3977       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3978       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3979       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3980       // Reassemble the low and high pieces reversed.
3981       // FIXME: This is a CONCAT_VECTORS.
3982       SDValue Res =
3983           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3984                       DAG.getIntPtrConstant(0, DL));
3985       return DAG.getNode(
3986           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3987           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3988     }
3989 
3990     // Just promote the int type to i16 which will double the LMUL.
3991     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
3992     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
3993   }
3994 
3995   MVT XLenVT = Subtarget.getXLenVT();
3996   SDValue Mask, VL;
3997   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3998 
3999   // Calculate VLMAX-1 for the desired SEW.
4000   unsigned MinElts = VecVT.getVectorMinNumElements();
4001   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4002                               DAG.getConstant(MinElts, DL, XLenVT));
4003   SDValue VLMinus1 =
4004       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4005 
4006   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4007   bool IsRV32E64 =
4008       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4009   SDValue SplatVL;
4010   if (!IsRV32E64)
4011     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4012   else
4013     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4014 
4015   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4016   SDValue Indices =
4017       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4018 
4019   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4020 }
4021 
4022 SDValue
4023 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4024                                                      SelectionDAG &DAG) const {
4025   SDLoc DL(Op);
4026   auto *Load = cast<LoadSDNode>(Op);
4027 
4028   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4029                                       Load->getMemoryVT(),
4030                                       *Load->getMemOperand())) {
4031     SDValue Result, Chain;
4032     std::tie(Result, Chain) = expandUnalignedLoad(Load, DAG);
4033     return DAG.getMergeValues({Result, Chain}, DL);
4034   }
4035 
4036   MVT VT = Op.getSimpleValueType();
4037   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4038 
4039   SDValue VL =
4040       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4041 
4042   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4043   SDValue NewLoad = DAG.getMemIntrinsicNode(
4044       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4045       Load->getMemoryVT(), Load->getMemOperand());
4046 
4047   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4048   return DAG.getMergeValues({Result, Load->getChain()}, DL);
4049 }
4050 
4051 SDValue
4052 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4053                                                       SelectionDAG &DAG) const {
4054   SDLoc DL(Op);
4055   auto *Store = cast<StoreSDNode>(Op);
4056 
4057   if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4058                                       Store->getMemoryVT(),
4059                                       *Store->getMemOperand()))
4060     return expandUnalignedStore(Store, DAG);
4061 
4062   SDValue StoreVal = Store->getValue();
4063   MVT VT = StoreVal.getSimpleValueType();
4064 
4065   // If the size less than a byte, we need to pad with zeros to make a byte.
4066   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4067     VT = MVT::v8i1;
4068     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4069                            DAG.getConstant(0, DL, VT), StoreVal,
4070                            DAG.getIntPtrConstant(0, DL));
4071   }
4072 
4073   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4074 
4075   SDValue VL =
4076       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4077 
4078   SDValue NewValue =
4079       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4080   return DAG.getMemIntrinsicNode(
4081       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4082       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4083       Store->getMemoryVT(), Store->getMemOperand());
4084 }
4085 
4086 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
4087   auto *Load = cast<MaskedLoadSDNode>(Op);
4088 
4089   SDLoc DL(Op);
4090   MVT VT = Op.getSimpleValueType();
4091   MVT XLenVT = Subtarget.getXLenVT();
4092 
4093   SDValue Mask = Load->getMask();
4094   SDValue PassThru = Load->getPassThru();
4095   SDValue VL;
4096 
4097   MVT ContainerVT = VT;
4098   if (VT.isFixedLengthVector()) {
4099     ContainerVT = getContainerForFixedLengthVector(VT);
4100     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4101 
4102     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4103     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4104     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4105   } else
4106     VL = DAG.getRegister(RISCV::X0, XLenVT);
4107 
4108   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4109   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4110   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
4111                    Load->getBasePtr(), Mask,  VL};
4112   SDValue Result =
4113       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4114                               Load->getMemoryVT(), Load->getMemOperand());
4115   SDValue Chain = Result.getValue(1);
4116 
4117   if (VT.isFixedLengthVector())
4118     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4119 
4120   return DAG.getMergeValues({Result, Chain}, DL);
4121 }
4122 
4123 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
4124   auto *Store = cast<MaskedStoreSDNode>(Op);
4125 
4126   SDLoc DL(Op);
4127   SDValue Val = Store->getValue();
4128   SDValue Mask = Store->getMask();
4129   MVT VT = Val.getSimpleValueType();
4130   MVT XLenVT = Subtarget.getXLenVT();
4131   SDValue VL;
4132 
4133   MVT ContainerVT = VT;
4134   if (VT.isFixedLengthVector()) {
4135     ContainerVT = getContainerForFixedLengthVector(VT);
4136     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4137 
4138     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4139     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4140     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4141   } else
4142     VL = DAG.getRegister(RISCV::X0, XLenVT);
4143 
4144   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
4145   return DAG.getMemIntrinsicNode(
4146       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
4147       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
4148       Store->getMemoryVT(), Store->getMemOperand());
4149 }
4150 
4151 SDValue
4152 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4153                                                       SelectionDAG &DAG) const {
4154   MVT InVT = Op.getOperand(0).getSimpleValueType();
4155   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4156 
4157   MVT VT = Op.getSimpleValueType();
4158 
4159   SDValue Op1 =
4160       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4161   SDValue Op2 =
4162       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4163 
4164   SDLoc DL(Op);
4165   SDValue VL =
4166       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4167 
4168   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4169   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4170 
4171   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4172                             Op.getOperand(2), Mask, VL);
4173 
4174   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4175 }
4176 
4177 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4178     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4179   MVT VT = Op.getSimpleValueType();
4180 
4181   if (VT.getVectorElementType() == MVT::i1)
4182     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4183 
4184   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4185 }
4186 
4187 // Lower vector ABS to smax(X, sub(0, X)).
4188 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4189   SDLoc DL(Op);
4190   MVT VT = Op.getSimpleValueType();
4191   SDValue X = Op.getOperand(0);
4192 
4193   assert(VT.isFixedLengthVector() && "Unexpected type");
4194 
4195   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4196   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4197 
4198   SDValue Mask, VL;
4199   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4200 
4201   SDValue SplatZero =
4202       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4203                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4204   SDValue NegX =
4205       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4206   SDValue Max =
4207       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4208 
4209   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4210 }
4211 
4212 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4213     SDValue Op, SelectionDAG &DAG) const {
4214   SDLoc DL(Op);
4215   MVT VT = Op.getSimpleValueType();
4216   SDValue Mag = Op.getOperand(0);
4217   SDValue Sign = Op.getOperand(1);
4218   assert(Mag.getValueType() == Sign.getValueType() &&
4219          "Can only handle COPYSIGN with matching types.");
4220 
4221   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4222   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4223   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4224 
4225   SDValue Mask, VL;
4226   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4227 
4228   SDValue CopySign =
4229       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4230 
4231   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4232 }
4233 
4234 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4235     SDValue Op, SelectionDAG &DAG) const {
4236   MVT VT = Op.getSimpleValueType();
4237   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4238 
4239   MVT I1ContainerVT =
4240       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4241 
4242   SDValue CC =
4243       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4244   SDValue Op1 =
4245       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4246   SDValue Op2 =
4247       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4248 
4249   SDLoc DL(Op);
4250   SDValue Mask, VL;
4251   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4252 
4253   SDValue Select =
4254       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4255 
4256   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4257 }
4258 
4259 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4260                                                unsigned NewOpc,
4261                                                bool HasMask) const {
4262   MVT VT = Op.getSimpleValueType();
4263   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4264 
4265   // Create list of operands by converting existing ones to scalable types.
4266   SmallVector<SDValue, 6> Ops;
4267   for (const SDValue &V : Op->op_values()) {
4268     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4269 
4270     // Pass through non-vector operands.
4271     if (!V.getValueType().isVector()) {
4272       Ops.push_back(V);
4273       continue;
4274     }
4275 
4276     // "cast" fixed length vector to a scalable vector.
4277     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4278            "Only fixed length vectors are supported!");
4279     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4280   }
4281 
4282   SDLoc DL(Op);
4283   SDValue Mask, VL;
4284   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4285   if (HasMask)
4286     Ops.push_back(Mask);
4287   Ops.push_back(VL);
4288 
4289   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4290   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4291 }
4292 
4293 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
4294 // * Operands of each node are assumed to be in the same order.
4295 // * The EVL operand is promoted from i32 to i64 on RV64.
4296 // * Fixed-length vectors are converted to their scalable-vector container
4297 //   types.
4298 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
4299                                        unsigned RISCVISDOpc) const {
4300   SDLoc DL(Op);
4301   MVT VT = Op.getSimpleValueType();
4302   SmallVector<SDValue, 4> Ops;
4303 
4304   for (const auto &OpIdx : enumerate(Op->ops())) {
4305     SDValue V = OpIdx.value();
4306     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4307     // Pass through operands which aren't fixed-length vectors.
4308     if (!V.getValueType().isFixedLengthVector()) {
4309       Ops.push_back(V);
4310       continue;
4311     }
4312     // "cast" fixed length vector to a scalable vector.
4313     MVT OpVT = V.getSimpleValueType();
4314     MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
4315     assert(useRVVForFixedLengthVectorVT(OpVT) &&
4316            "Only fixed length vectors are supported!");
4317     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4318   }
4319 
4320   if (!VT.isFixedLengthVector())
4321     return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
4322 
4323   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4324 
4325   SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
4326 
4327   return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
4328 }
4329 
4330 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4331 // a RVV indexed load. The RVV indexed load instructions only support the
4332 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4333 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4334 // indexing is extended to the XLEN value type and scaled accordingly.
4335 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4336   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4337   SDLoc DL(Op);
4338 
4339   SDValue Index = MGN->getIndex();
4340   SDValue Mask = MGN->getMask();
4341   SDValue PassThru = MGN->getPassThru();
4342 
4343   MVT VT = Op.getSimpleValueType();
4344   MVT IndexVT = Index.getSimpleValueType();
4345   MVT XLenVT = Subtarget.getXLenVT();
4346 
4347   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4348          "Unexpected VTs!");
4349   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4350          "Unexpected pointer type");
4351   // Targets have to explicitly opt-in for extending vector loads.
4352   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4353          "Unexpected extending MGATHER");
4354 
4355   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4356   // the selection of the masked intrinsics doesn't do this for us.
4357   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4358 
4359   SDValue VL;
4360   MVT ContainerVT = VT;
4361   if (VT.isFixedLengthVector()) {
4362     // We need to use the larger of the result and index type to determine the
4363     // scalable type to use so we don't increase LMUL for any operand/result.
4364     if (VT.bitsGE(IndexVT)) {
4365       ContainerVT = getContainerForFixedLengthVector(VT);
4366       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4367                                  ContainerVT.getVectorElementCount());
4368     } else {
4369       IndexVT = getContainerForFixedLengthVector(IndexVT);
4370       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4371                                      IndexVT.getVectorElementCount());
4372     }
4373 
4374     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4375 
4376     if (!IsUnmasked) {
4377       MVT MaskVT =
4378           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4379       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4380       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4381     }
4382 
4383     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4384   } else
4385     VL = DAG.getRegister(RISCV::X0, XLenVT);
4386 
4387   unsigned IntID =
4388       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4389   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4390                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4391   if (!IsUnmasked)
4392     Ops.push_back(PassThru);
4393   Ops.push_back(MGN->getBasePtr());
4394   Ops.push_back(Index);
4395   if (!IsUnmasked)
4396     Ops.push_back(Mask);
4397   Ops.push_back(VL);
4398 
4399   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4400   SDValue Result =
4401       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4402                               MGN->getMemoryVT(), MGN->getMemOperand());
4403   SDValue Chain = Result.getValue(1);
4404 
4405   if (VT.isFixedLengthVector())
4406     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4407 
4408   return DAG.getMergeValues({Result, Chain}, DL);
4409 }
4410 
4411 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4412 // a RVV indexed store. The RVV indexed store instructions only support the
4413 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4414 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4415 // indexing is extended to the XLEN value type and scaled accordingly.
4416 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4417                                            SelectionDAG &DAG) const {
4418   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4419   SDLoc DL(Op);
4420   SDValue Index = MSN->getIndex();
4421   SDValue Mask = MSN->getMask();
4422   SDValue Val = MSN->getValue();
4423 
4424   MVT VT = Val.getSimpleValueType();
4425   MVT IndexVT = Index.getSimpleValueType();
4426   MVT XLenVT = Subtarget.getXLenVT();
4427 
4428   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4429          "Unexpected VTs!");
4430   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4431          "Unexpected pointer type");
4432   // Targets have to explicitly opt-in for extending vector loads and
4433   // truncating vector stores.
4434   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4435 
4436   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4437   // the selection of the masked intrinsics doesn't do this for us.
4438   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4439 
4440   SDValue VL;
4441   if (VT.isFixedLengthVector()) {
4442     // We need to use the larger of the value and index type to determine the
4443     // scalable type to use so we don't increase LMUL for any operand/result.
4444     MVT ContainerVT;
4445     if (VT.bitsGE(IndexVT)) {
4446       ContainerVT = getContainerForFixedLengthVector(VT);
4447       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4448                                  ContainerVT.getVectorElementCount());
4449     } else {
4450       IndexVT = getContainerForFixedLengthVector(IndexVT);
4451       ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
4452                                      IndexVT.getVectorElementCount());
4453     }
4454 
4455     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4456     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4457 
4458     if (!IsUnmasked) {
4459       MVT MaskVT =
4460           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4461       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4462     }
4463 
4464     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4465   } else
4466     VL = DAG.getRegister(RISCV::X0, XLenVT);
4467 
4468   unsigned IntID =
4469       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4470   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4471                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4472   Ops.push_back(Val);
4473   Ops.push_back(MSN->getBasePtr());
4474   Ops.push_back(Index);
4475   if (!IsUnmasked)
4476     Ops.push_back(Mask);
4477   Ops.push_back(VL);
4478 
4479   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4480                                  MSN->getMemoryVT(), MSN->getMemOperand());
4481 }
4482 
4483 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4484                                                SelectionDAG &DAG) const {
4485   const MVT XLenVT = Subtarget.getXLenVT();
4486   SDLoc DL(Op);
4487   SDValue Chain = Op->getOperand(0);
4488   SDValue SysRegNo = DAG.getConstant(
4489       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4490   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4491   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4492 
4493   // Encoding used for rounding mode in RISCV differs from that used in
4494   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4495   // table, which consists of a sequence of 4-bit fields, each representing
4496   // corresponding FLT_ROUNDS mode.
4497   static const int Table =
4498       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4499       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4500       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4501       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4502       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4503 
4504   SDValue Shift =
4505       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4506   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4507                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4508   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4509                                DAG.getConstant(7, DL, XLenVT));
4510 
4511   return DAG.getMergeValues({Masked, Chain}, DL);
4512 }
4513 
4514 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4515                                                SelectionDAG &DAG) const {
4516   const MVT XLenVT = Subtarget.getXLenVT();
4517   SDLoc DL(Op);
4518   SDValue Chain = Op->getOperand(0);
4519   SDValue RMValue = Op->getOperand(1);
4520   SDValue SysRegNo = DAG.getConstant(
4521       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4522 
4523   // Encoding used for rounding mode in RISCV differs from that used in
4524   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4525   // a table, which consists of a sequence of 4-bit fields, each representing
4526   // corresponding RISCV mode.
4527   static const unsigned Table =
4528       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4529       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4530       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4531       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4532       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4533 
4534   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4535                               DAG.getConstant(2, DL, XLenVT));
4536   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4537                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4538   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4539                         DAG.getConstant(0x7, DL, XLenVT));
4540   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4541                      RMValue);
4542 }
4543 
4544 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4545 // form of the given Opcode.
4546 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4547   switch (Opcode) {
4548   default:
4549     llvm_unreachable("Unexpected opcode");
4550   case ISD::SHL:
4551     return RISCVISD::SLLW;
4552   case ISD::SRA:
4553     return RISCVISD::SRAW;
4554   case ISD::SRL:
4555     return RISCVISD::SRLW;
4556   case ISD::SDIV:
4557     return RISCVISD::DIVW;
4558   case ISD::UDIV:
4559     return RISCVISD::DIVUW;
4560   case ISD::UREM:
4561     return RISCVISD::REMUW;
4562   case ISD::ROTL:
4563     return RISCVISD::ROLW;
4564   case ISD::ROTR:
4565     return RISCVISD::RORW;
4566   case RISCVISD::GREV:
4567     return RISCVISD::GREVW;
4568   case RISCVISD::GORC:
4569     return RISCVISD::GORCW;
4570   }
4571 }
4572 
4573 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4574 // Because i32 isn't a legal type for RV64, these operations would otherwise
4575 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4576 // later one because the fact the operation was originally of type i32 is
4577 // lost.
4578 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4579                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4580   SDLoc DL(N);
4581   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4582   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4583   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4584   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4585   // ReplaceNodeResults requires we maintain the same type for the return value.
4586   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4587 }
4588 
4589 // Converts the given 32-bit operation to a i64 operation with signed extension
4590 // semantic to reduce the signed extension instructions.
4591 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4592   SDLoc DL(N);
4593   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4594   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4595   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4596   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4597                                DAG.getValueType(MVT::i32));
4598   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4599 }
4600 
4601 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4602                                              SmallVectorImpl<SDValue> &Results,
4603                                              SelectionDAG &DAG) const {
4604   SDLoc DL(N);
4605   switch (N->getOpcode()) {
4606   default:
4607     llvm_unreachable("Don't know how to custom type legalize this operation!");
4608   case ISD::STRICT_FP_TO_SINT:
4609   case ISD::STRICT_FP_TO_UINT:
4610   case ISD::FP_TO_SINT:
4611   case ISD::FP_TO_UINT: {
4612     bool IsStrict = N->isStrictFPOpcode();
4613     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4614            "Unexpected custom legalisation");
4615     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4616     // If the FP type needs to be softened, emit a library call using the 'si'
4617     // version. If we left it to default legalization we'd end up with 'di'. If
4618     // the FP type doesn't need to be softened just let generic type
4619     // legalization promote the result type.
4620     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4621         TargetLowering::TypeSoftenFloat)
4622       return;
4623     RTLIB::Libcall LC;
4624     if (N->getOpcode() == ISD::FP_TO_SINT ||
4625         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4626       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4627     else
4628       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4629     MakeLibCallOptions CallOptions;
4630     EVT OpVT = Op0.getValueType();
4631     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4632     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4633     SDValue Result;
4634     std::tie(Result, Chain) =
4635         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4636     Results.push_back(Result);
4637     if (IsStrict)
4638       Results.push_back(Chain);
4639     break;
4640   }
4641   case ISD::READCYCLECOUNTER: {
4642     assert(!Subtarget.is64Bit() &&
4643            "READCYCLECOUNTER only has custom type legalization on riscv32");
4644 
4645     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4646     SDValue RCW =
4647         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4648 
4649     Results.push_back(
4650         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4651     Results.push_back(RCW.getValue(2));
4652     break;
4653   }
4654   case ISD::MUL: {
4655     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4656     unsigned XLen = Subtarget.getXLen();
4657     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4658     if (Size > XLen) {
4659       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4660       SDValue LHS = N->getOperand(0);
4661       SDValue RHS = N->getOperand(1);
4662       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4663 
4664       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4665       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4666       // We need exactly one side to be unsigned.
4667       if (LHSIsU == RHSIsU)
4668         return;
4669 
4670       auto MakeMULPair = [&](SDValue S, SDValue U) {
4671         MVT XLenVT = Subtarget.getXLenVT();
4672         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4673         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4674         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4675         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4676         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4677       };
4678 
4679       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4680       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4681 
4682       // The other operand should be signed, but still prefer MULH when
4683       // possible.
4684       if (RHSIsU && LHSIsS && !RHSIsS)
4685         Results.push_back(MakeMULPair(LHS, RHS));
4686       else if (LHSIsU && RHSIsS && !LHSIsS)
4687         Results.push_back(MakeMULPair(RHS, LHS));
4688 
4689       return;
4690     }
4691     LLVM_FALLTHROUGH;
4692   }
4693   case ISD::ADD:
4694   case ISD::SUB:
4695     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4696            "Unexpected custom legalisation");
4697     if (N->getOperand(1).getOpcode() == ISD::Constant)
4698       return;
4699     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4700     break;
4701   case ISD::SHL:
4702   case ISD::SRA:
4703   case ISD::SRL:
4704     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4705            "Unexpected custom legalisation");
4706     if (N->getOperand(1).getOpcode() == ISD::Constant)
4707       return;
4708     Results.push_back(customLegalizeToWOp(N, DAG));
4709     break;
4710   case ISD::ROTL:
4711   case ISD::ROTR:
4712     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4713            "Unexpected custom legalisation");
4714     Results.push_back(customLegalizeToWOp(N, DAG));
4715     break;
4716   case ISD::CTTZ:
4717   case ISD::CTTZ_ZERO_UNDEF:
4718   case ISD::CTLZ:
4719   case ISD::CTLZ_ZERO_UNDEF: {
4720     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4721            "Unexpected custom legalisation");
4722 
4723     SDValue NewOp0 =
4724         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4725     bool IsCTZ =
4726         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4727     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4728     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4729     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4730     return;
4731   }
4732   case ISD::SDIV:
4733   case ISD::UDIV:
4734   case ISD::UREM: {
4735     MVT VT = N->getSimpleValueType(0);
4736     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4737            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4738            "Unexpected custom legalisation");
4739     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4740         N->getOperand(1).getOpcode() == ISD::Constant)
4741       return;
4742 
4743     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4744     // the upper 32 bits. For other types we need to sign or zero extend
4745     // based on the opcode.
4746     unsigned ExtOpc = ISD::ANY_EXTEND;
4747     if (VT != MVT::i32)
4748       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4749                                            : ISD::ZERO_EXTEND;
4750 
4751     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4752     break;
4753   }
4754   case ISD::UADDO:
4755   case ISD::USUBO: {
4756     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4757            "Unexpected custom legalisation");
4758     bool IsAdd = N->getOpcode() == ISD::UADDO;
4759     // Create an ADDW or SUBW.
4760     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4761     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4762     SDValue Res =
4763         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4764     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4765                       DAG.getValueType(MVT::i32));
4766 
4767     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4768     // Since the inputs are sign extended from i32, this is equivalent to
4769     // comparing the lower 32 bits.
4770     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4771     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4772                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4773 
4774     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4775     Results.push_back(Overflow);
4776     return;
4777   }
4778   case ISD::UADDSAT:
4779   case ISD::USUBSAT: {
4780     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4781            "Unexpected custom legalisation");
4782     if (Subtarget.hasStdExtZbb()) {
4783       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4784       // sign extend allows overflow of the lower 32 bits to be detected on
4785       // the promoted size.
4786       SDValue LHS =
4787           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4788       SDValue RHS =
4789           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4790       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4791       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4792       return;
4793     }
4794 
4795     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4796     // promotion for UADDO/USUBO.
4797     Results.push_back(expandAddSubSat(N, DAG));
4798     return;
4799   }
4800   case ISD::BITCAST: {
4801     EVT VT = N->getValueType(0);
4802     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4803     SDValue Op0 = N->getOperand(0);
4804     EVT Op0VT = Op0.getValueType();
4805     MVT XLenVT = Subtarget.getXLenVT();
4806     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4807       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4808       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4809     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4810                Subtarget.hasStdExtF()) {
4811       SDValue FPConv =
4812           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4813       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4814     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4815                isTypeLegal(Op0VT)) {
4816       // Custom-legalize bitcasts from fixed-length vector types to illegal
4817       // scalar types in order to improve codegen. Bitcast the vector to a
4818       // one-element vector type whose element type is the same as the result
4819       // type, and extract the first element.
4820       LLVMContext &Context = *DAG.getContext();
4821       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4822       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4823                                     DAG.getConstant(0, DL, XLenVT)));
4824     }
4825     break;
4826   }
4827   case RISCVISD::GREV:
4828   case RISCVISD::GORC: {
4829     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4830            "Unexpected custom legalisation");
4831     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4832     // This is similar to customLegalizeToWOp, except that we pass the second
4833     // operand (a TargetConstant) straight through: it is already of type
4834     // XLenVT.
4835     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4836     SDValue NewOp0 =
4837         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4838     SDValue NewOp1 =
4839         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4840     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4841     // ReplaceNodeResults requires we maintain the same type for the return
4842     // value.
4843     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4844     break;
4845   }
4846   case RISCVISD::SHFL: {
4847     // There is no SHFLIW instruction, but we can just promote the operation.
4848     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4849            "Unexpected custom legalisation");
4850     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4851     SDValue NewOp0 =
4852         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4853     SDValue NewOp1 =
4854         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4855     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4856     // ReplaceNodeResults requires we maintain the same type for the return
4857     // value.
4858     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4859     break;
4860   }
4861   case ISD::BSWAP:
4862   case ISD::BITREVERSE: {
4863     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4864            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4865     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
4866                                  N->getOperand(0));
4867     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
4868     SDValue GREVIW = DAG.getNode(RISCVISD::GREVW, DL, MVT::i64, NewOp0,
4869                                  DAG.getConstant(Imm, DL, MVT::i64));
4870     // ReplaceNodeResults requires we maintain the same type for the return
4871     // value.
4872     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
4873     break;
4874   }
4875   case ISD::FSHL:
4876   case ISD::FSHR: {
4877     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4878            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4879     SDValue NewOp0 =
4880         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4881     SDValue NewOp1 =
4882         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4883     SDValue NewOp2 =
4884         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4885     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4886     // Mask the shift amount to 5 bits.
4887     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4888                          DAG.getConstant(0x1f, DL, MVT::i64));
4889     unsigned Opc =
4890         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
4891     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
4892     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
4893     break;
4894   }
4895   case ISD::EXTRACT_VECTOR_ELT: {
4896     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
4897     // type is illegal (currently only vXi64 RV32).
4898     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
4899     // transferred to the destination register. We issue two of these from the
4900     // upper- and lower- halves of the SEW-bit vector element, slid down to the
4901     // first element.
4902     SDValue Vec = N->getOperand(0);
4903     SDValue Idx = N->getOperand(1);
4904 
4905     // The vector type hasn't been legalized yet so we can't issue target
4906     // specific nodes if it needs legalization.
4907     // FIXME: We would manually legalize if it's important.
4908     if (!isTypeLegal(Vec.getValueType()))
4909       return;
4910 
4911     MVT VecVT = Vec.getSimpleValueType();
4912 
4913     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
4914            VecVT.getVectorElementType() == MVT::i64 &&
4915            "Unexpected EXTRACT_VECTOR_ELT legalization");
4916 
4917     // If this is a fixed vector, we need to convert it to a scalable vector.
4918     MVT ContainerVT = VecVT;
4919     if (VecVT.isFixedLengthVector()) {
4920       ContainerVT = getContainerForFixedLengthVector(VecVT);
4921       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4922     }
4923 
4924     MVT XLenVT = Subtarget.getXLenVT();
4925 
4926     // Use a VL of 1 to avoid processing more elements than we need.
4927     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4928     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4929     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4930 
4931     // Unless the index is known to be 0, we must slide the vector down to get
4932     // the desired element into index 0.
4933     if (!isNullConstant(Idx)) {
4934       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4935                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4936     }
4937 
4938     // Extract the lower XLEN bits of the correct vector element.
4939     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4940 
4941     // To extract the upper XLEN bits of the vector element, shift the first
4942     // element right by 32 bits and re-extract the lower XLEN bits.
4943     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4944                                      DAG.getConstant(32, DL, XLenVT), VL);
4945     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
4946                                  ThirtyTwoV, Mask, VL);
4947 
4948     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4949 
4950     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4951     break;
4952   }
4953   case ISD::INTRINSIC_WO_CHAIN: {
4954     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4955     switch (IntNo) {
4956     default:
4957       llvm_unreachable(
4958           "Don't know how to custom type legalize this intrinsic!");
4959     case Intrinsic::riscv_orc_b: {
4960       // Lower to the GORCI encoding for orc.b with the operand extended.
4961       SDValue NewOp =
4962           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4963       // If Zbp is enabled, use GORCIW which will sign extend the result.
4964       unsigned Opc =
4965           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
4966       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
4967                                 DAG.getConstant(7, DL, MVT::i64));
4968       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4969       return;
4970     }
4971     case Intrinsic::riscv_grev:
4972     case Intrinsic::riscv_gorc: {
4973       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4974              "Unexpected custom legalisation");
4975       SDValue NewOp1 =
4976           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4977       SDValue NewOp2 =
4978           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4979       unsigned Opc =
4980           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
4981       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4982       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4983       break;
4984     }
4985     case Intrinsic::riscv_shfl:
4986     case Intrinsic::riscv_unshfl: {
4987       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4988              "Unexpected custom legalisation");
4989       SDValue NewOp1 =
4990           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4991       SDValue NewOp2 =
4992           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4993       unsigned Opc =
4994           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
4995       if (isa<ConstantSDNode>(N->getOperand(2))) {
4996         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4997                              DAG.getConstant(0xf, DL, MVT::i64));
4998         Opc =
4999             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5000       }
5001       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5002       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5003       break;
5004     }
5005     case Intrinsic::riscv_bcompress:
5006     case Intrinsic::riscv_bdecompress: {
5007       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5008              "Unexpected custom legalisation");
5009       SDValue NewOp1 =
5010           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5011       SDValue NewOp2 =
5012           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5013       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5014                          ? RISCVISD::BCOMPRESSW
5015                          : RISCVISD::BDECOMPRESSW;
5016       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5017       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5018       break;
5019     }
5020     case Intrinsic::riscv_vmv_x_s: {
5021       EVT VT = N->getValueType(0);
5022       MVT XLenVT = Subtarget.getXLenVT();
5023       if (VT.bitsLT(XLenVT)) {
5024         // Simple case just extract using vmv.x.s and truncate.
5025         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5026                                       Subtarget.getXLenVT(), N->getOperand(1));
5027         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5028         return;
5029       }
5030 
5031       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5032              "Unexpected custom legalization");
5033 
5034       // We need to do the move in two steps.
5035       SDValue Vec = N->getOperand(1);
5036       MVT VecVT = Vec.getSimpleValueType();
5037 
5038       // First extract the lower XLEN bits of the element.
5039       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5040 
5041       // To extract the upper XLEN bits of the vector element, shift the first
5042       // element right by 32 bits and re-extract the lower XLEN bits.
5043       SDValue VL = DAG.getConstant(1, DL, XLenVT);
5044       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5045       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5046       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5047                                        DAG.getConstant(32, DL, XLenVT), VL);
5048       SDValue LShr32 =
5049           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5050       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5051 
5052       Results.push_back(
5053           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5054       break;
5055     }
5056     }
5057     break;
5058   }
5059   case ISD::VECREDUCE_ADD:
5060   case ISD::VECREDUCE_AND:
5061   case ISD::VECREDUCE_OR:
5062   case ISD::VECREDUCE_XOR:
5063   case ISD::VECREDUCE_SMAX:
5064   case ISD::VECREDUCE_UMAX:
5065   case ISD::VECREDUCE_SMIN:
5066   case ISD::VECREDUCE_UMIN:
5067     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5068       Results.push_back(V);
5069     break;
5070   case ISD::FLT_ROUNDS_: {
5071     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5072     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5073     Results.push_back(Res.getValue(0));
5074     Results.push_back(Res.getValue(1));
5075     break;
5076   }
5077   }
5078 }
5079 
5080 // A structure to hold one of the bit-manipulation patterns below. Together, a
5081 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5082 //   (or (and (shl x, 1), 0xAAAAAAAA),
5083 //       (and (srl x, 1), 0x55555555))
5084 struct RISCVBitmanipPat {
5085   SDValue Op;
5086   unsigned ShAmt;
5087   bool IsSHL;
5088 
5089   bool formsPairWith(const RISCVBitmanipPat &Other) const {
5090     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5091   }
5092 };
5093 
5094 // Matches patterns of the form
5095 //   (and (shl x, C2), (C1 << C2))
5096 //   (and (srl x, C2), C1)
5097 //   (shl (and x, C1), C2)
5098 //   (srl (and x, (C1 << C2)), C2)
5099 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5100 // The expected masks for each shift amount are specified in BitmanipMasks where
5101 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5102 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5103 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5104 // XLen is 64.
5105 static Optional<RISCVBitmanipPat>
5106 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5107   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5108          "Unexpected number of masks");
5109   Optional<uint64_t> Mask;
5110   // Optionally consume a mask around the shift operation.
5111   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5112     Mask = Op.getConstantOperandVal(1);
5113     Op = Op.getOperand(0);
5114   }
5115   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5116     return None;
5117   bool IsSHL = Op.getOpcode() == ISD::SHL;
5118 
5119   if (!isa<ConstantSDNode>(Op.getOperand(1)))
5120     return None;
5121   uint64_t ShAmt = Op.getConstantOperandVal(1);
5122 
5123   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5124   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
5125     return None;
5126   // If we don't have enough masks for 64 bit, then we must be trying to
5127   // match SHFL so we're only allowed to shift 1/4 of the width.
5128   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
5129     return None;
5130 
5131   SDValue Src = Op.getOperand(0);
5132 
5133   // The expected mask is shifted left when the AND is found around SHL
5134   // patterns.
5135   //   ((x >> 1) & 0x55555555)
5136   //   ((x << 1) & 0xAAAAAAAA)
5137   bool SHLExpMask = IsSHL;
5138 
5139   if (!Mask) {
5140     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
5141     // the mask is all ones: consume that now.
5142     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
5143       Mask = Src.getConstantOperandVal(1);
5144       Src = Src.getOperand(0);
5145       // The expected mask is now in fact shifted left for SRL, so reverse the
5146       // decision.
5147       //   ((x & 0xAAAAAAAA) >> 1)
5148       //   ((x & 0x55555555) << 1)
5149       SHLExpMask = !SHLExpMask;
5150     } else {
5151       // Use a default shifted mask of all-ones if there's no AND, truncated
5152       // down to the expected width. This simplifies the logic later on.
5153       Mask = maskTrailingOnes<uint64_t>(Width);
5154       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
5155     }
5156   }
5157 
5158   unsigned MaskIdx = Log2_32(ShAmt);
5159   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5160 
5161   if (SHLExpMask)
5162     ExpMask <<= ShAmt;
5163 
5164   if (Mask != ExpMask)
5165     return None;
5166 
5167   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
5168 }
5169 
5170 // Matches any of the following bit-manipulation patterns:
5171 //   (and (shl x, 1), (0x55555555 << 1))
5172 //   (and (srl x, 1), 0x55555555)
5173 //   (shl (and x, 0x55555555), 1)
5174 //   (srl (and x, (0x55555555 << 1)), 1)
5175 // where the shift amount and mask may vary thus:
5176 //   [1]  = 0x55555555 / 0xAAAAAAAA
5177 //   [2]  = 0x33333333 / 0xCCCCCCCC
5178 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
5179 //   [8]  = 0x00FF00FF / 0xFF00FF00
5180 //   [16] = 0x0000FFFF / 0xFFFFFFFF
5181 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
5182 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
5183   // These are the unshifted masks which we use to match bit-manipulation
5184   // patterns. They may be shifted left in certain circumstances.
5185   static const uint64_t BitmanipMasks[] = {
5186       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
5187       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
5188 
5189   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5190 }
5191 
5192 // Match the following pattern as a GREVI(W) operation
5193 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
5194 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
5195                                const RISCVSubtarget &Subtarget) {
5196   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5197   EVT VT = Op.getValueType();
5198 
5199   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5200     auto LHS = matchGREVIPat(Op.getOperand(0));
5201     auto RHS = matchGREVIPat(Op.getOperand(1));
5202     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
5203       SDLoc DL(Op);
5204       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
5205                          DAG.getConstant(LHS->ShAmt, DL, VT));
5206     }
5207   }
5208   return SDValue();
5209 }
5210 
5211 // Matches any the following pattern as a GORCI(W) operation
5212 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
5213 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
5214 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
5215 // Note that with the variant of 3.,
5216 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
5217 // the inner pattern will first be matched as GREVI and then the outer
5218 // pattern will be matched to GORC via the first rule above.
5219 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5220 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5221                                const RISCVSubtarget &Subtarget) {
5222   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5223   EVT VT = Op.getValueType();
5224 
5225   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5226     SDLoc DL(Op);
5227     SDValue Op0 = Op.getOperand(0);
5228     SDValue Op1 = Op.getOperand(1);
5229 
5230     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5231       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5232           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5233           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5234         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5235       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5236       if ((Reverse.getOpcode() == ISD::ROTL ||
5237            Reverse.getOpcode() == ISD::ROTR) &&
5238           Reverse.getOperand(0) == X &&
5239           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5240         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5241         if (RotAmt == (VT.getSizeInBits() / 2))
5242           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5243                              DAG.getConstant(RotAmt, DL, VT));
5244       }
5245       return SDValue();
5246     };
5247 
5248     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5249     if (SDValue V = MatchOROfReverse(Op0, Op1))
5250       return V;
5251     if (SDValue V = MatchOROfReverse(Op1, Op0))
5252       return V;
5253 
5254     // OR is commutable so canonicalize its OR operand to the left
5255     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5256       std::swap(Op0, Op1);
5257     if (Op0.getOpcode() != ISD::OR)
5258       return SDValue();
5259     SDValue OrOp0 = Op0.getOperand(0);
5260     SDValue OrOp1 = Op0.getOperand(1);
5261     auto LHS = matchGREVIPat(OrOp0);
5262     // OR is commutable so swap the operands and try again: x might have been
5263     // on the left
5264     if (!LHS) {
5265       std::swap(OrOp0, OrOp1);
5266       LHS = matchGREVIPat(OrOp0);
5267     }
5268     auto RHS = matchGREVIPat(Op1);
5269     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5270       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5271                          DAG.getConstant(LHS->ShAmt, DL, VT));
5272     }
5273   }
5274   return SDValue();
5275 }
5276 
5277 // Matches any of the following bit-manipulation patterns:
5278 //   (and (shl x, 1), (0x22222222 << 1))
5279 //   (and (srl x, 1), 0x22222222)
5280 //   (shl (and x, 0x22222222), 1)
5281 //   (srl (and x, (0x22222222 << 1)), 1)
5282 // where the shift amount and mask may vary thus:
5283 //   [1]  = 0x22222222 / 0x44444444
5284 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5285 //   [4]  = 0x00F000F0 / 0x0F000F00
5286 //   [8]  = 0x0000FF00 / 0x00FF0000
5287 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5288 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5289   // These are the unshifted masks which we use to match bit-manipulation
5290   // patterns. They may be shifted left in certain circumstances.
5291   static const uint64_t BitmanipMasks[] = {
5292       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5293       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5294 
5295   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5296 }
5297 
5298 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5299 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5300                                const RISCVSubtarget &Subtarget) {
5301   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5302   EVT VT = Op.getValueType();
5303 
5304   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5305     return SDValue();
5306 
5307   SDValue Op0 = Op.getOperand(0);
5308   SDValue Op1 = Op.getOperand(1);
5309 
5310   // Or is commutable so canonicalize the second OR to the LHS.
5311   if (Op0.getOpcode() != ISD::OR)
5312     std::swap(Op0, Op1);
5313   if (Op0.getOpcode() != ISD::OR)
5314     return SDValue();
5315 
5316   // We found an inner OR, so our operands are the operands of the inner OR
5317   // and the other operand of the outer OR.
5318   SDValue A = Op0.getOperand(0);
5319   SDValue B = Op0.getOperand(1);
5320   SDValue C = Op1;
5321 
5322   auto Match1 = matchSHFLPat(A);
5323   auto Match2 = matchSHFLPat(B);
5324 
5325   // If neither matched, we failed.
5326   if (!Match1 && !Match2)
5327     return SDValue();
5328 
5329   // We had at least one match. if one failed, try the remaining C operand.
5330   if (!Match1) {
5331     std::swap(A, C);
5332     Match1 = matchSHFLPat(A);
5333     if (!Match1)
5334       return SDValue();
5335   } else if (!Match2) {
5336     std::swap(B, C);
5337     Match2 = matchSHFLPat(B);
5338     if (!Match2)
5339       return SDValue();
5340   }
5341   assert(Match1 && Match2);
5342 
5343   // Make sure our matches pair up.
5344   if (!Match1->formsPairWith(*Match2))
5345     return SDValue();
5346 
5347   // All the remains is to make sure C is an AND with the same input, that masks
5348   // out the bits that are being shuffled.
5349   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5350       C.getOperand(0) != Match1->Op)
5351     return SDValue();
5352 
5353   uint64_t Mask = C.getConstantOperandVal(1);
5354 
5355   static const uint64_t BitmanipMasks[] = {
5356       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5357       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5358   };
5359 
5360   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5361   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5362   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5363 
5364   if (Mask != ExpMask)
5365     return SDValue();
5366 
5367   SDLoc DL(Op);
5368   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5369                      DAG.getConstant(Match1->ShAmt, DL, VT));
5370 }
5371 
5372 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5373 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5374 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5375 // not undo itself, but they are redundant.
5376 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5377   SDValue Src = N->getOperand(0);
5378 
5379   if (Src.getOpcode() != N->getOpcode())
5380     return SDValue();
5381 
5382   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5383       !isa<ConstantSDNode>(Src.getOperand(1)))
5384     return SDValue();
5385 
5386   unsigned ShAmt1 = N->getConstantOperandVal(1);
5387   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5388   Src = Src.getOperand(0);
5389 
5390   unsigned CombinedShAmt;
5391   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5392     CombinedShAmt = ShAmt1 | ShAmt2;
5393   else
5394     CombinedShAmt = ShAmt1 ^ ShAmt2;
5395 
5396   if (CombinedShAmt == 0)
5397     return Src;
5398 
5399   SDLoc DL(N);
5400   return DAG.getNode(
5401       N->getOpcode(), DL, N->getValueType(0), Src,
5402       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5403 }
5404 
5405 // Combine a constant select operand into its use:
5406 //
5407 // (and (select_cc lhs, rhs, cc, -1, c), x)
5408 //   -> (select_cc lhs, rhs, cc, x, (and, x, c))  [AllOnes=1]
5409 // (or  (select_cc lhs, rhs, cc, 0, c), x)
5410 //   -> (select_cc lhs, rhs, cc, x, (or, x, c))  [AllOnes=0]
5411 // (xor (select_cc lhs, rhs, cc, 0, c), x)
5412 //   -> (select_cc lhs, rhs, cc, x, (xor, x, c))  [AllOnes=0]
5413 static SDValue combineSelectCCAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
5414                                      SelectionDAG &DAG, bool AllOnes) {
5415   EVT VT = N->getValueType(0);
5416 
5417   if (Slct.getOpcode() != RISCVISD::SELECT_CC || !Slct.hasOneUse())
5418     return SDValue();
5419 
5420   auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
5421     return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
5422   };
5423 
5424   bool SwapSelectOps;
5425   SDValue TrueVal = Slct.getOperand(3);
5426   SDValue FalseVal = Slct.getOperand(4);
5427   SDValue NonConstantVal;
5428   if (isZeroOrAllOnes(TrueVal, AllOnes)) {
5429     SwapSelectOps = false;
5430     NonConstantVal = FalseVal;
5431   } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
5432     SwapSelectOps = true;
5433     NonConstantVal = TrueVal;
5434   } else
5435     return SDValue();
5436 
5437   // Slct is now know to be the desired identity constant when CC is true.
5438   TrueVal = OtherOp;
5439   FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
5440   // Unless SwapSelectOps says CC should be false.
5441   if (SwapSelectOps)
5442     std::swap(TrueVal, FalseVal);
5443 
5444   return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
5445                      {Slct.getOperand(0), Slct.getOperand(1),
5446                       Slct.getOperand(2), TrueVal, FalseVal});
5447 }
5448 
5449 // Attempt combineSelectAndUse on each operand of a commutative operator N.
5450 static SDValue combineSelectCCAndUseCommutative(SDNode *N, SelectionDAG &DAG,
5451                                                 bool AllOnes) {
5452   SDValue N0 = N->getOperand(0);
5453   SDValue N1 = N->getOperand(1);
5454   if (SDValue Result = combineSelectCCAndUse(N, N0, N1, DAG, AllOnes))
5455     return Result;
5456   if (SDValue Result = combineSelectCCAndUse(N, N1, N0, DAG, AllOnes))
5457     return Result;
5458   return SDValue();
5459 }
5460 
5461 static SDValue performANDCombine(SDNode *N,
5462                                  TargetLowering::DAGCombinerInfo &DCI,
5463                                  const RISCVSubtarget &Subtarget) {
5464   SelectionDAG &DAG = DCI.DAG;
5465 
5466   // fold (and (select_cc lhs, rhs, cc, -1, y), x) ->
5467   //      (select lhs, rhs, cc, x, (and x, y))
5468   return combineSelectCCAndUseCommutative(N, DAG, true);
5469 }
5470 
5471 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
5472                                 const RISCVSubtarget &Subtarget) {
5473   SelectionDAG &DAG = DCI.DAG;
5474   if (Subtarget.hasStdExtZbp()) {
5475     if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
5476       return GREV;
5477     if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
5478       return GORC;
5479     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
5480       return SHFL;
5481   }
5482 
5483   // fold (or (select_cc lhs, rhs, cc, 0, y), x) ->
5484   //      (select lhs, rhs, cc, x, (or x, y))
5485   return combineSelectCCAndUseCommutative(N, DAG, false);
5486 }
5487 
5488 static SDValue performXORCombine(SDNode *N,
5489                                  TargetLowering::DAGCombinerInfo &DCI,
5490                                  const RISCVSubtarget &Subtarget) {
5491   SelectionDAG &DAG = DCI.DAG;
5492 
5493   // fold (xor (select_cc lhs, rhs, cc, 0, y), x) ->
5494   //      (select lhs, rhs, cc, x, (xor x, y))
5495   return combineSelectCCAndUseCommutative(N, DAG, false);
5496 }
5497 
5498 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5499                                                DAGCombinerInfo &DCI) const {
5500   SelectionDAG &DAG = DCI.DAG;
5501 
5502   switch (N->getOpcode()) {
5503   default:
5504     break;
5505   case RISCVISD::SplitF64: {
5506     SDValue Op0 = N->getOperand(0);
5507     // If the input to SplitF64 is just BuildPairF64 then the operation is
5508     // redundant. Instead, use BuildPairF64's operands directly.
5509     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5510       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5511 
5512     SDLoc DL(N);
5513 
5514     // It's cheaper to materialise two 32-bit integers than to load a double
5515     // from the constant pool and transfer it to integer registers through the
5516     // stack.
5517     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5518       APInt V = C->getValueAPF().bitcastToAPInt();
5519       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5520       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5521       return DCI.CombineTo(N, Lo, Hi);
5522     }
5523 
5524     // This is a target-specific version of a DAGCombine performed in
5525     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5526     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5527     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5528     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5529         !Op0.getNode()->hasOneUse())
5530       break;
5531     SDValue NewSplitF64 =
5532         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5533                     Op0.getOperand(0));
5534     SDValue Lo = NewSplitF64.getValue(0);
5535     SDValue Hi = NewSplitF64.getValue(1);
5536     APInt SignBit = APInt::getSignMask(32);
5537     if (Op0.getOpcode() == ISD::FNEG) {
5538       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5539                                   DAG.getConstant(SignBit, DL, MVT::i32));
5540       return DCI.CombineTo(N, Lo, NewHi);
5541     }
5542     assert(Op0.getOpcode() == ISD::FABS);
5543     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5544                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5545     return DCI.CombineTo(N, Lo, NewHi);
5546   }
5547   case RISCVISD::SLLW:
5548   case RISCVISD::SRAW:
5549   case RISCVISD::SRLW:
5550   case RISCVISD::ROLW:
5551   case RISCVISD::RORW: {
5552     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5553     SDValue LHS = N->getOperand(0);
5554     SDValue RHS = N->getOperand(1);
5555     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5556     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5557     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5558         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5559       if (N->getOpcode() != ISD::DELETED_NODE)
5560         DCI.AddToWorklist(N);
5561       return SDValue(N, 0);
5562     }
5563     break;
5564   }
5565   case RISCVISD::CLZW:
5566   case RISCVISD::CTZW: {
5567     // Only the lower 32 bits of the first operand are read
5568     SDValue Op0 = N->getOperand(0);
5569     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5570     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5571       if (N->getOpcode() != ISD::DELETED_NODE)
5572         DCI.AddToWorklist(N);
5573       return SDValue(N, 0);
5574     }
5575     break;
5576   }
5577   case RISCVISD::FSL:
5578   case RISCVISD::FSR: {
5579     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5580     SDValue ShAmt = N->getOperand(2);
5581     unsigned BitWidth = ShAmt.getValueSizeInBits();
5582     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5583     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5584     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5585       if (N->getOpcode() != ISD::DELETED_NODE)
5586         DCI.AddToWorklist(N);
5587       return SDValue(N, 0);
5588     }
5589     break;
5590   }
5591   case RISCVISD::FSLW:
5592   case RISCVISD::FSRW: {
5593     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5594     // read.
5595     SDValue Op0 = N->getOperand(0);
5596     SDValue Op1 = N->getOperand(1);
5597     SDValue ShAmt = N->getOperand(2);
5598     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5599     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5600     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5601         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5602         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5603       if (N->getOpcode() != ISD::DELETED_NODE)
5604         DCI.AddToWorklist(N);
5605       return SDValue(N, 0);
5606     }
5607     break;
5608   }
5609   case RISCVISD::GREV:
5610   case RISCVISD::GORC: {
5611     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5612     SDValue ShAmt = N->getOperand(1);
5613     unsigned BitWidth = ShAmt.getValueSizeInBits();
5614     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5615     APInt ShAmtMask(BitWidth, BitWidth - 1);
5616     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5617       if (N->getOpcode() != ISD::DELETED_NODE)
5618         DCI.AddToWorklist(N);
5619       return SDValue(N, 0);
5620     }
5621 
5622     return combineGREVI_GORCI(N, DCI.DAG);
5623   }
5624   case RISCVISD::GREVW:
5625   case RISCVISD::GORCW: {
5626     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5627     SDValue LHS = N->getOperand(0);
5628     SDValue RHS = N->getOperand(1);
5629     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5630     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5631     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5632         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5633       if (N->getOpcode() != ISD::DELETED_NODE)
5634         DCI.AddToWorklist(N);
5635       return SDValue(N, 0);
5636     }
5637 
5638     return combineGREVI_GORCI(N, DCI.DAG);
5639   }
5640   case RISCVISD::SHFL:
5641   case RISCVISD::UNSHFL: {
5642     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5643     SDValue ShAmt = N->getOperand(1);
5644     unsigned BitWidth = ShAmt.getValueSizeInBits();
5645     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5646     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5647     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5648       if (N->getOpcode() != ISD::DELETED_NODE)
5649         DCI.AddToWorklist(N);
5650       return SDValue(N, 0);
5651     }
5652 
5653     break;
5654   }
5655   case RISCVISD::SHFLW:
5656   case RISCVISD::UNSHFLW: {
5657     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5658     SDValue LHS = N->getOperand(0);
5659     SDValue RHS = N->getOperand(1);
5660     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5661     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5662     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5663         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5664       if (N->getOpcode() != ISD::DELETED_NODE)
5665         DCI.AddToWorklist(N);
5666       return SDValue(N, 0);
5667     }
5668 
5669     break;
5670   }
5671   case RISCVISD::BCOMPRESSW:
5672   case RISCVISD::BDECOMPRESSW: {
5673     // Only the lower 32 bits of LHS and RHS are read.
5674     SDValue LHS = N->getOperand(0);
5675     SDValue RHS = N->getOperand(1);
5676     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5677     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5678         SimplifyDemandedBits(RHS, Mask, DCI)) {
5679       if (N->getOpcode() != ISD::DELETED_NODE)
5680         DCI.AddToWorklist(N);
5681       return SDValue(N, 0);
5682     }
5683 
5684     break;
5685   }
5686   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5687     SDLoc DL(N);
5688     SDValue Op0 = N->getOperand(0);
5689     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5690     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5691     // of the FMV_W_X_RV64 operand.
5692     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5693       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5694              "Unexpected value type!");
5695       return Op0.getOperand(0);
5696     }
5697 
5698     // This is a target-specific version of a DAGCombine performed in
5699     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5700     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5701     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5702     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5703         !Op0.getNode()->hasOneUse())
5704       break;
5705     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5706                                  Op0.getOperand(0));
5707     APInt SignBit = APInt::getSignMask(32).sext(64);
5708     if (Op0.getOpcode() == ISD::FNEG)
5709       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5710                          DAG.getConstant(SignBit, DL, MVT::i64));
5711 
5712     assert(Op0.getOpcode() == ISD::FABS);
5713     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5714                        DAG.getConstant(~SignBit, DL, MVT::i64));
5715   }
5716   case ISD::AND:
5717     return performANDCombine(N, DCI, Subtarget);
5718   case ISD::OR:
5719     return performORCombine(N, DCI, Subtarget);
5720   case ISD::XOR:
5721     return performXORCombine(N, DCI, Subtarget);
5722   case RISCVISD::SELECT_CC: {
5723     // Transform
5724     SDValue LHS = N->getOperand(0);
5725     SDValue RHS = N->getOperand(1);
5726     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5727     if (!ISD::isIntEqualitySetCC(CCVal))
5728       break;
5729 
5730     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5731     //      (select_cc X, Y, lt, trueV, falseV)
5732     // Sometimes the setcc is introduced after select_cc has been formed.
5733     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5734         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5735       // If we're looking for eq 0 instead of ne 0, we need to invert the
5736       // condition.
5737       bool Invert = CCVal == ISD::SETEQ;
5738       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5739       if (Invert)
5740         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5741 
5742       SDLoc DL(N);
5743       RHS = LHS.getOperand(1);
5744       LHS = LHS.getOperand(0);
5745       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5746 
5747       SDValue TargetCC =
5748           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5749       return DAG.getNode(
5750           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5751           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5752     }
5753 
5754     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5755     //      (select_cc X, Y, eq/ne, trueV, falseV)
5756     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5757       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5758                          {LHS.getOperand(0), LHS.getOperand(1),
5759                           N->getOperand(2), N->getOperand(3),
5760                           N->getOperand(4)});
5761     // (select_cc X, 1, setne, trueV, falseV) ->
5762     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5763     // This can occur when legalizing some floating point comparisons.
5764     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5765     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5766       SDLoc DL(N);
5767       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5768       SDValue TargetCC =
5769           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5770       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5771       return DAG.getNode(
5772           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5773           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5774     }
5775 
5776     break;
5777   }
5778   case RISCVISD::BR_CC: {
5779     SDValue LHS = N->getOperand(1);
5780     SDValue RHS = N->getOperand(2);
5781     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5782     if (!ISD::isIntEqualitySetCC(CCVal))
5783       break;
5784 
5785     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5786     //      (br_cc X, Y, lt, dest)
5787     // Sometimes the setcc is introduced after br_cc has been formed.
5788     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5789         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5790       // If we're looking for eq 0 instead of ne 0, we need to invert the
5791       // condition.
5792       bool Invert = CCVal == ISD::SETEQ;
5793       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5794       if (Invert)
5795         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5796 
5797       SDLoc DL(N);
5798       RHS = LHS.getOperand(1);
5799       LHS = LHS.getOperand(0);
5800       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5801 
5802       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5803                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5804                          N->getOperand(4));
5805     }
5806 
5807     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5808     //      (br_cc X, Y, eq/ne, trueV, falseV)
5809     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5810       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5811                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5812                          N->getOperand(3), N->getOperand(4));
5813 
5814     // (br_cc X, 1, setne, br_cc) ->
5815     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5816     // This can occur when legalizing some floating point comparisons.
5817     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5818     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5819       SDLoc DL(N);
5820       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5821       SDValue TargetCC = DAG.getCondCode(CCVal);
5822       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5823       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5824                          N->getOperand(0), LHS, RHS, TargetCC,
5825                          N->getOperand(4));
5826     }
5827     break;
5828   }
5829   case ISD::FCOPYSIGN: {
5830     EVT VT = N->getValueType(0);
5831     if (!VT.isVector())
5832       break;
5833     // There is a form of VFSGNJ which injects the negated sign of its second
5834     // operand. Try and bubble any FNEG up after the extend/round to produce
5835     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5836     // TRUNC=1.
5837     SDValue In2 = N->getOperand(1);
5838     // Avoid cases where the extend/round has multiple uses, as duplicating
5839     // those is typically more expensive than removing a fneg.
5840     if (!In2.hasOneUse())
5841       break;
5842     if (In2.getOpcode() != ISD::FP_EXTEND &&
5843         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5844       break;
5845     In2 = In2.getOperand(0);
5846     if (In2.getOpcode() != ISD::FNEG)
5847       break;
5848     SDLoc DL(N);
5849     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5850     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5851                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5852   }
5853   case ISD::MGATHER:
5854   case ISD::MSCATTER: {
5855     if (!DCI.isBeforeLegalize())
5856       break;
5857     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5858     SDValue Index = MGSN->getIndex();
5859     EVT IndexVT = Index.getValueType();
5860     MVT XLenVT = Subtarget.getXLenVT();
5861     // RISCV indexed loads only support the "unsigned unscaled" addressing
5862     // mode, so anything else must be manually legalized.
5863     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5864                                 (MGSN->isIndexSigned() &&
5865                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5866     if (!NeedsIdxLegalization)
5867       break;
5868 
5869     SDLoc DL(N);
5870 
5871     // Any index legalization should first promote to XLenVT, so we don't lose
5872     // bits when scaling. This may create an illegal index type so we let
5873     // LLVM's legalization take care of the splitting.
5874     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5875       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5876       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5877                                                 : ISD::ZERO_EXTEND,
5878                           DL, IndexVT, Index);
5879     }
5880 
5881     unsigned Scale = N->getConstantOperandVal(5);
5882     if (MGSN->isIndexScaled() && Scale != 1) {
5883       // Manually scale the indices by the element size.
5884       // TODO: Sanitize the scale operand here?
5885       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5886       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5887       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5888     }
5889 
5890     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
5891     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
5892       return DAG.getMaskedGather(
5893           N->getVTList(), MGSN->getMemoryVT(), DL,
5894           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
5895            MGSN->getBasePtr(), Index, MGN->getScale()},
5896           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
5897     }
5898     const auto *MSN = cast<MaskedScatterSDNode>(N);
5899     return DAG.getMaskedScatter(
5900         N->getVTList(), MGSN->getMemoryVT(), DL,
5901         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
5902          Index, MGSN->getScale()},
5903         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
5904   }
5905   case RISCVISD::SRA_VL:
5906   case RISCVISD::SRL_VL:
5907   case RISCVISD::SHL_VL: {
5908     SDValue ShAmt = N->getOperand(1);
5909     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
5910       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
5911       SDLoc DL(N);
5912       SDValue VL = N->getOperand(3);
5913       EVT VT = N->getValueType(0);
5914       ShAmt =
5915           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
5916       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
5917                          N->getOperand(2), N->getOperand(3));
5918     }
5919     break;
5920   }
5921   case ISD::SRA:
5922   case ISD::SRL:
5923   case ISD::SHL: {
5924     SDValue ShAmt = N->getOperand(1);
5925     if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
5926       // We don't need the upper 32 bits of a 64-bit element for a shift amount.
5927       SDLoc DL(N);
5928       EVT VT = N->getValueType(0);
5929       ShAmt =
5930           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
5931       return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
5932     }
5933     break;
5934   }
5935   }
5936 
5937   return SDValue();
5938 }
5939 
5940 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
5941     const SDNode *N, CombineLevel Level) const {
5942   // The following folds are only desirable if `(OP _, c1 << c2)` can be
5943   // materialised in fewer instructions than `(OP _, c1)`:
5944   //
5945   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
5946   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
5947   SDValue N0 = N->getOperand(0);
5948   EVT Ty = N0.getValueType();
5949   if (Ty.isScalarInteger() &&
5950       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
5951     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
5952     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
5953     if (C1 && C2) {
5954       const APInt &C1Int = C1->getAPIntValue();
5955       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
5956 
5957       // We can materialise `c1 << c2` into an add immediate, so it's "free",
5958       // and the combine should happen, to potentially allow further combines
5959       // later.
5960       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
5961           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
5962         return true;
5963 
5964       // We can materialise `c1` in an add immediate, so it's "free", and the
5965       // combine should be prevented.
5966       if (C1Int.getMinSignedBits() <= 64 &&
5967           isLegalAddImmediate(C1Int.getSExtValue()))
5968         return false;
5969 
5970       // Neither constant will fit into an immediate, so find materialisation
5971       // costs.
5972       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
5973                                               Subtarget.is64Bit());
5974       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
5975           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
5976 
5977       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
5978       // combine should be prevented.
5979       if (C1Cost < ShiftedC1Cost)
5980         return false;
5981     }
5982   }
5983   return true;
5984 }
5985 
5986 bool RISCVTargetLowering::targetShrinkDemandedConstant(
5987     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
5988     TargetLoweringOpt &TLO) const {
5989   // Delay this optimization as late as possible.
5990   if (!TLO.LegalOps)
5991     return false;
5992 
5993   EVT VT = Op.getValueType();
5994   if (VT.isVector())
5995     return false;
5996 
5997   // Only handle AND for now.
5998   if (Op.getOpcode() != ISD::AND)
5999     return false;
6000 
6001   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
6002   if (!C)
6003     return false;
6004 
6005   const APInt &Mask = C->getAPIntValue();
6006 
6007   // Clear all non-demanded bits initially.
6008   APInt ShrunkMask = Mask & DemandedBits;
6009 
6010   // Try to make a smaller immediate by setting undemanded bits.
6011 
6012   APInt ExpandedMask = Mask | ~DemandedBits;
6013 
6014   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
6015     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
6016   };
6017   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
6018     if (NewMask == Mask)
6019       return true;
6020     SDLoc DL(Op);
6021     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
6022     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
6023     return TLO.CombineTo(Op, NewOp);
6024   };
6025 
6026   // If the shrunk mask fits in sign extended 12 bits, let the target
6027   // independent code apply it.
6028   if (ShrunkMask.isSignedIntN(12))
6029     return false;
6030 
6031   // Preserve (and X, 0xffff) when zext.h is supported.
6032   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
6033     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
6034     if (IsLegalMask(NewMask))
6035       return UseMask(NewMask);
6036   }
6037 
6038   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
6039   if (VT == MVT::i64) {
6040     APInt NewMask = APInt(64, 0xffffffff);
6041     if (IsLegalMask(NewMask))
6042       return UseMask(NewMask);
6043   }
6044 
6045   // For the remaining optimizations, we need to be able to make a negative
6046   // number through a combination of mask and undemanded bits.
6047   if (!ExpandedMask.isNegative())
6048     return false;
6049 
6050   // What is the fewest number of bits we need to represent the negative number.
6051   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
6052 
6053   // Try to make a 12 bit negative immediate. If that fails try to make a 32
6054   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
6055   APInt NewMask = ShrunkMask;
6056   if (MinSignedBits <= 12)
6057     NewMask.setBitsFrom(11);
6058   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
6059     NewMask.setBitsFrom(31);
6060   else
6061     return false;
6062 
6063   // Sanity check that our new mask is a subset of the demanded mask.
6064   assert(IsLegalMask(NewMask));
6065   return UseMask(NewMask);
6066 }
6067 
6068 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
6069                                                         KnownBits &Known,
6070                                                         const APInt &DemandedElts,
6071                                                         const SelectionDAG &DAG,
6072                                                         unsigned Depth) const {
6073   unsigned BitWidth = Known.getBitWidth();
6074   unsigned Opc = Op.getOpcode();
6075   assert((Opc >= ISD::BUILTIN_OP_END ||
6076           Opc == ISD::INTRINSIC_WO_CHAIN ||
6077           Opc == ISD::INTRINSIC_W_CHAIN ||
6078           Opc == ISD::INTRINSIC_VOID) &&
6079          "Should use MaskedValueIsZero if you don't know whether Op"
6080          " is a target node!");
6081 
6082   Known.resetAll();
6083   switch (Opc) {
6084   default: break;
6085   case RISCVISD::SELECT_CC: {
6086     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
6087     // If we don't know any bits, early out.
6088     if (Known.isUnknown())
6089       break;
6090     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
6091 
6092     // Only known if known in both the LHS and RHS.
6093     Known = KnownBits::commonBits(Known, Known2);
6094     break;
6095   }
6096   case RISCVISD::REMUW: {
6097     KnownBits Known2;
6098     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6099     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6100     // We only care about the lower 32 bits.
6101     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
6102     // Restore the original width by sign extending.
6103     Known = Known.sext(BitWidth);
6104     break;
6105   }
6106   case RISCVISD::DIVUW: {
6107     KnownBits Known2;
6108     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
6109     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
6110     // We only care about the lower 32 bits.
6111     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
6112     // Restore the original width by sign extending.
6113     Known = Known.sext(BitWidth);
6114     break;
6115   }
6116   case RISCVISD::CTZW: {
6117     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6118     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
6119     unsigned LowBits = Log2_32(PossibleTZ) + 1;
6120     Known.Zero.setBitsFrom(LowBits);
6121     break;
6122   }
6123   case RISCVISD::CLZW: {
6124     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
6125     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
6126     unsigned LowBits = Log2_32(PossibleLZ) + 1;
6127     Known.Zero.setBitsFrom(LowBits);
6128     break;
6129   }
6130   case RISCVISD::READ_VLENB:
6131     // We assume VLENB is at least 16 bytes.
6132     Known.Zero.setLowBits(4);
6133     break;
6134   case ISD::INTRINSIC_W_CHAIN: {
6135     unsigned IntNo = Op.getConstantOperandVal(1);
6136     switch (IntNo) {
6137     default:
6138       // We can't do anything for most intrinsics.
6139       break;
6140     case Intrinsic::riscv_vsetvli:
6141     case Intrinsic::riscv_vsetvlimax:
6142       // Assume that VL output is positive and would fit in an int32_t.
6143       // TODO: VLEN might be capped at 16 bits in a future V spec update.
6144       if (BitWidth >= 32)
6145         Known.Zero.setBitsFrom(31);
6146       break;
6147     }
6148     break;
6149   }
6150   }
6151 }
6152 
6153 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
6154     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
6155     unsigned Depth) const {
6156   switch (Op.getOpcode()) {
6157   default:
6158     break;
6159   case RISCVISD::SLLW:
6160   case RISCVISD::SRAW:
6161   case RISCVISD::SRLW:
6162   case RISCVISD::DIVW:
6163   case RISCVISD::DIVUW:
6164   case RISCVISD::REMUW:
6165   case RISCVISD::ROLW:
6166   case RISCVISD::RORW:
6167   case RISCVISD::GREVW:
6168   case RISCVISD::GORCW:
6169   case RISCVISD::FSLW:
6170   case RISCVISD::FSRW:
6171   case RISCVISD::SHFLW:
6172   case RISCVISD::UNSHFLW:
6173   case RISCVISD::BCOMPRESSW:
6174   case RISCVISD::BDECOMPRESSW:
6175     // TODO: As the result is sign-extended, this is conservatively correct. A
6176     // more precise answer could be calculated for SRAW depending on known
6177     // bits in the shift amount.
6178     return 33;
6179   case RISCVISD::SHFL:
6180   case RISCVISD::UNSHFL: {
6181     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
6182     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
6183     // will stay within the upper 32 bits. If there were more than 32 sign bits
6184     // before there will be at least 33 sign bits after.
6185     if (Op.getValueType() == MVT::i64 &&
6186         isa<ConstantSDNode>(Op.getOperand(1)) &&
6187         (Op.getConstantOperandVal(1) & 0x10) == 0) {
6188       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
6189       if (Tmp > 32)
6190         return 33;
6191     }
6192     break;
6193   }
6194   case RISCVISD::VMV_X_S:
6195     // The number of sign bits of the scalar result is computed by obtaining the
6196     // element type of the input vector operand, subtracting its width from the
6197     // XLEN, and then adding one (sign bit within the element type). If the
6198     // element type is wider than XLen, the least-significant XLEN bits are
6199     // taken.
6200     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
6201       return 1;
6202     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
6203   }
6204 
6205   return 1;
6206 }
6207 
6208 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
6209                                                   MachineBasicBlock *BB) {
6210   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
6211 
6212   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
6213   // Should the count have wrapped while it was being read, we need to try
6214   // again.
6215   // ...
6216   // read:
6217   // rdcycleh x3 # load high word of cycle
6218   // rdcycle  x2 # load low word of cycle
6219   // rdcycleh x4 # load high word of cycle
6220   // bne x3, x4, read # check if high word reads match, otherwise try again
6221   // ...
6222 
6223   MachineFunction &MF = *BB->getParent();
6224   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6225   MachineFunction::iterator It = ++BB->getIterator();
6226 
6227   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6228   MF.insert(It, LoopMBB);
6229 
6230   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
6231   MF.insert(It, DoneMBB);
6232 
6233   // Transfer the remainder of BB and its successor edges to DoneMBB.
6234   DoneMBB->splice(DoneMBB->begin(), BB,
6235                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
6236   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
6237 
6238   BB->addSuccessor(LoopMBB);
6239 
6240   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6241   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6242   Register LoReg = MI.getOperand(0).getReg();
6243   Register HiReg = MI.getOperand(1).getReg();
6244   DebugLoc DL = MI.getDebugLoc();
6245 
6246   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
6247   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
6248       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6249       .addReg(RISCV::X0);
6250   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
6251       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
6252       .addReg(RISCV::X0);
6253   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
6254       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
6255       .addReg(RISCV::X0);
6256 
6257   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
6258       .addReg(HiReg)
6259       .addReg(ReadAgainReg)
6260       .addMBB(LoopMBB);
6261 
6262   LoopMBB->addSuccessor(LoopMBB);
6263   LoopMBB->addSuccessor(DoneMBB);
6264 
6265   MI.eraseFromParent();
6266 
6267   return DoneMBB;
6268 }
6269 
6270 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
6271                                              MachineBasicBlock *BB) {
6272   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
6273 
6274   MachineFunction &MF = *BB->getParent();
6275   DebugLoc DL = MI.getDebugLoc();
6276   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6277   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6278   Register LoReg = MI.getOperand(0).getReg();
6279   Register HiReg = MI.getOperand(1).getReg();
6280   Register SrcReg = MI.getOperand(2).getReg();
6281   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
6282   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6283 
6284   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
6285                           RI);
6286   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6287   MachineMemOperand *MMOLo =
6288       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
6289   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6290       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
6291   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
6292       .addFrameIndex(FI)
6293       .addImm(0)
6294       .addMemOperand(MMOLo);
6295   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
6296       .addFrameIndex(FI)
6297       .addImm(4)
6298       .addMemOperand(MMOHi);
6299   MI.eraseFromParent(); // The pseudo instruction is gone now.
6300   return BB;
6301 }
6302 
6303 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
6304                                                  MachineBasicBlock *BB) {
6305   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
6306          "Unexpected instruction");
6307 
6308   MachineFunction &MF = *BB->getParent();
6309   DebugLoc DL = MI.getDebugLoc();
6310   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6311   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
6312   Register DstReg = MI.getOperand(0).getReg();
6313   Register LoReg = MI.getOperand(1).getReg();
6314   Register HiReg = MI.getOperand(2).getReg();
6315   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
6316   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
6317 
6318   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
6319   MachineMemOperand *MMOLo =
6320       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
6321   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
6322       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
6323   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6324       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
6325       .addFrameIndex(FI)
6326       .addImm(0)
6327       .addMemOperand(MMOLo);
6328   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
6329       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
6330       .addFrameIndex(FI)
6331       .addImm(4)
6332       .addMemOperand(MMOHi);
6333   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
6334   MI.eraseFromParent(); // The pseudo instruction is gone now.
6335   return BB;
6336 }
6337 
6338 static bool isSelectPseudo(MachineInstr &MI) {
6339   switch (MI.getOpcode()) {
6340   default:
6341     return false;
6342   case RISCV::Select_GPR_Using_CC_GPR:
6343   case RISCV::Select_FPR16_Using_CC_GPR:
6344   case RISCV::Select_FPR32_Using_CC_GPR:
6345   case RISCV::Select_FPR64_Using_CC_GPR:
6346     return true;
6347   }
6348 }
6349 
6350 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
6351                                            MachineBasicBlock *BB) {
6352   // To "insert" Select_* instructions, we actually have to insert the triangle
6353   // control-flow pattern.  The incoming instructions know the destination vreg
6354   // to set, the condition code register to branch on, the true/false values to
6355   // select between, and the condcode to use to select the appropriate branch.
6356   //
6357   // We produce the following control flow:
6358   //     HeadMBB
6359   //     |  \
6360   //     |  IfFalseMBB
6361   //     | /
6362   //    TailMBB
6363   //
6364   // When we find a sequence of selects we attempt to optimize their emission
6365   // by sharing the control flow. Currently we only handle cases where we have
6366   // multiple selects with the exact same condition (same LHS, RHS and CC).
6367   // The selects may be interleaved with other instructions if the other
6368   // instructions meet some requirements we deem safe:
6369   // - They are debug instructions. Otherwise,
6370   // - They do not have side-effects, do not access memory and their inputs do
6371   //   not depend on the results of the select pseudo-instructions.
6372   // The TrueV/FalseV operands of the selects cannot depend on the result of
6373   // previous selects in the sequence.
6374   // These conditions could be further relaxed. See the X86 target for a
6375   // related approach and more information.
6376   Register LHS = MI.getOperand(1).getReg();
6377   Register RHS = MI.getOperand(2).getReg();
6378   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6379 
6380   SmallVector<MachineInstr *, 4> SelectDebugValues;
6381   SmallSet<Register, 4> SelectDests;
6382   SelectDests.insert(MI.getOperand(0).getReg());
6383 
6384   MachineInstr *LastSelectPseudo = &MI;
6385 
6386   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6387        SequenceMBBI != E; ++SequenceMBBI) {
6388     if (SequenceMBBI->isDebugInstr())
6389       continue;
6390     else if (isSelectPseudo(*SequenceMBBI)) {
6391       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6392           SequenceMBBI->getOperand(2).getReg() != RHS ||
6393           SequenceMBBI->getOperand(3).getImm() != CC ||
6394           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6395           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6396         break;
6397       LastSelectPseudo = &*SequenceMBBI;
6398       SequenceMBBI->collectDebugValues(SelectDebugValues);
6399       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6400     } else {
6401       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6402           SequenceMBBI->mayLoadOrStore())
6403         break;
6404       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6405             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6406           }))
6407         break;
6408     }
6409   }
6410 
6411   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6412   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6413   DebugLoc DL = MI.getDebugLoc();
6414   MachineFunction::iterator I = ++BB->getIterator();
6415 
6416   MachineBasicBlock *HeadMBB = BB;
6417   MachineFunction *F = BB->getParent();
6418   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6419   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6420 
6421   F->insert(I, IfFalseMBB);
6422   F->insert(I, TailMBB);
6423 
6424   // Transfer debug instructions associated with the selects to TailMBB.
6425   for (MachineInstr *DebugInstr : SelectDebugValues) {
6426     TailMBB->push_back(DebugInstr->removeFromParent());
6427   }
6428 
6429   // Move all instructions after the sequence to TailMBB.
6430   TailMBB->splice(TailMBB->end(), HeadMBB,
6431                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6432   // Update machine-CFG edges by transferring all successors of the current
6433   // block to the new block which will contain the Phi nodes for the selects.
6434   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6435   // Set the successors for HeadMBB.
6436   HeadMBB->addSuccessor(IfFalseMBB);
6437   HeadMBB->addSuccessor(TailMBB);
6438 
6439   // Insert appropriate branch.
6440   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6441 
6442   BuildMI(HeadMBB, DL, TII.get(Opcode))
6443     .addReg(LHS)
6444     .addReg(RHS)
6445     .addMBB(TailMBB);
6446 
6447   // IfFalseMBB just falls through to TailMBB.
6448   IfFalseMBB->addSuccessor(TailMBB);
6449 
6450   // Create PHIs for all of the select pseudo-instructions.
6451   auto SelectMBBI = MI.getIterator();
6452   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6453   auto InsertionPoint = TailMBB->begin();
6454   while (SelectMBBI != SelectEnd) {
6455     auto Next = std::next(SelectMBBI);
6456     if (isSelectPseudo(*SelectMBBI)) {
6457       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6458       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6459               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6460           .addReg(SelectMBBI->getOperand(4).getReg())
6461           .addMBB(HeadMBB)
6462           .addReg(SelectMBBI->getOperand(5).getReg())
6463           .addMBB(IfFalseMBB);
6464       SelectMBBI->eraseFromParent();
6465     }
6466     SelectMBBI = Next;
6467   }
6468 
6469   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6470   return TailMBB;
6471 }
6472 
6473 MachineBasicBlock *
6474 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6475                                                  MachineBasicBlock *BB) const {
6476   switch (MI.getOpcode()) {
6477   default:
6478     llvm_unreachable("Unexpected instr type to insert");
6479   case RISCV::ReadCycleWide:
6480     assert(!Subtarget.is64Bit() &&
6481            "ReadCycleWrite is only to be used on riscv32");
6482     return emitReadCycleWidePseudo(MI, BB);
6483   case RISCV::Select_GPR_Using_CC_GPR:
6484   case RISCV::Select_FPR16_Using_CC_GPR:
6485   case RISCV::Select_FPR32_Using_CC_GPR:
6486   case RISCV::Select_FPR64_Using_CC_GPR:
6487     return emitSelectPseudo(MI, BB);
6488   case RISCV::BuildPairF64Pseudo:
6489     return emitBuildPairF64Pseudo(MI, BB);
6490   case RISCV::SplitF64Pseudo:
6491     return emitSplitF64Pseudo(MI, BB);
6492   }
6493 }
6494 
6495 // Calling Convention Implementation.
6496 // The expectations for frontend ABI lowering vary from target to target.
6497 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6498 // details, but this is a longer term goal. For now, we simply try to keep the
6499 // role of the frontend as simple and well-defined as possible. The rules can
6500 // be summarised as:
6501 // * Never split up large scalar arguments. We handle them here.
6502 // * If a hardfloat calling convention is being used, and the struct may be
6503 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6504 // available, then pass as two separate arguments. If either the GPRs or FPRs
6505 // are exhausted, then pass according to the rule below.
6506 // * If a struct could never be passed in registers or directly in a stack
6507 // slot (as it is larger than 2*XLEN and the floating point rules don't
6508 // apply), then pass it using a pointer with the byval attribute.
6509 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6510 // word-sized array or a 2*XLEN scalar (depending on alignment).
6511 // * The frontend can determine whether a struct is returned by reference or
6512 // not based on its size and fields. If it will be returned by reference, the
6513 // frontend must modify the prototype so a pointer with the sret annotation is
6514 // passed as the first argument. This is not necessary for large scalar
6515 // returns.
6516 // * Struct return values and varargs should be coerced to structs containing
6517 // register-size fields in the same situations they would be for fixed
6518 // arguments.
6519 
6520 static const MCPhysReg ArgGPRs[] = {
6521   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6522   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6523 };
6524 static const MCPhysReg ArgFPR16s[] = {
6525   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6526   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6527 };
6528 static const MCPhysReg ArgFPR32s[] = {
6529   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6530   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6531 };
6532 static const MCPhysReg ArgFPR64s[] = {
6533   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6534   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6535 };
6536 // This is an interim calling convention and it may be changed in the future.
6537 static const MCPhysReg ArgVRs[] = {
6538     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6539     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6540     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6541 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6542                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6543                                      RISCV::V20M2, RISCV::V22M2};
6544 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6545                                      RISCV::V20M4};
6546 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6547 
6548 // Pass a 2*XLEN argument that has been split into two XLEN values through
6549 // registers or the stack as necessary.
6550 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6551                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6552                                 MVT ValVT2, MVT LocVT2,
6553                                 ISD::ArgFlagsTy ArgFlags2) {
6554   unsigned XLenInBytes = XLen / 8;
6555   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6556     // At least one half can be passed via register.
6557     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6558                                      VA1.getLocVT(), CCValAssign::Full));
6559   } else {
6560     // Both halves must be passed on the stack, with proper alignment.
6561     Align StackAlign =
6562         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6563     State.addLoc(
6564         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6565                             State.AllocateStack(XLenInBytes, StackAlign),
6566                             VA1.getLocVT(), CCValAssign::Full));
6567     State.addLoc(CCValAssign::getMem(
6568         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6569         LocVT2, CCValAssign::Full));
6570     return false;
6571   }
6572 
6573   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6574     // The second half can also be passed via register.
6575     State.addLoc(
6576         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6577   } else {
6578     // The second half is passed via the stack, without additional alignment.
6579     State.addLoc(CCValAssign::getMem(
6580         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6581         LocVT2, CCValAssign::Full));
6582   }
6583 
6584   return false;
6585 }
6586 
6587 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
6588                                Optional<unsigned> FirstMaskArgument,
6589                                CCState &State, const RISCVTargetLowering &TLI) {
6590   const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6591   if (RC == &RISCV::VRRegClass) {
6592     // Assign the first mask argument to V0.
6593     // This is an interim calling convention and it may be changed in the
6594     // future.
6595     if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
6596       return State.AllocateReg(RISCV::V0);
6597     return State.AllocateReg(ArgVRs);
6598   }
6599   if (RC == &RISCV::VRM2RegClass)
6600     return State.AllocateReg(ArgVRM2s);
6601   if (RC == &RISCV::VRM4RegClass)
6602     return State.AllocateReg(ArgVRM4s);
6603   if (RC == &RISCV::VRM8RegClass)
6604     return State.AllocateReg(ArgVRM8s);
6605   llvm_unreachable("Unhandled register class for ValueType");
6606 }
6607 
6608 // Implements the RISC-V calling convention. Returns true upon failure.
6609 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6610                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6611                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6612                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6613                      Optional<unsigned> FirstMaskArgument) {
6614   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6615   assert(XLen == 32 || XLen == 64);
6616   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6617 
6618   // Any return value split in to more than two values can't be returned
6619   // directly. Vectors are returned via the available vector registers.
6620   if (!LocVT.isVector() && IsRet && ValNo > 1)
6621     return true;
6622 
6623   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6624   // variadic argument, or if no F16/F32 argument registers are available.
6625   bool UseGPRForF16_F32 = true;
6626   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6627   // variadic argument, or if no F64 argument registers are available.
6628   bool UseGPRForF64 = true;
6629 
6630   switch (ABI) {
6631   default:
6632     llvm_unreachable("Unexpected ABI");
6633   case RISCVABI::ABI_ILP32:
6634   case RISCVABI::ABI_LP64:
6635     break;
6636   case RISCVABI::ABI_ILP32F:
6637   case RISCVABI::ABI_LP64F:
6638     UseGPRForF16_F32 = !IsFixed;
6639     break;
6640   case RISCVABI::ABI_ILP32D:
6641   case RISCVABI::ABI_LP64D:
6642     UseGPRForF16_F32 = !IsFixed;
6643     UseGPRForF64 = !IsFixed;
6644     break;
6645   }
6646 
6647   // FPR16, FPR32, and FPR64 alias each other.
6648   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6649     UseGPRForF16_F32 = true;
6650     UseGPRForF64 = true;
6651   }
6652 
6653   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6654   // similar local variables rather than directly checking against the target
6655   // ABI.
6656 
6657   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6658     LocVT = XLenVT;
6659     LocInfo = CCValAssign::BCvt;
6660   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6661     LocVT = MVT::i64;
6662     LocInfo = CCValAssign::BCvt;
6663   }
6664 
6665   // If this is a variadic argument, the RISC-V calling convention requires
6666   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6667   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6668   // be used regardless of whether the original argument was split during
6669   // legalisation or not. The argument will not be passed by registers if the
6670   // original type is larger than 2*XLEN, so the register alignment rule does
6671   // not apply.
6672   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6673   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6674       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6675     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6676     // Skip 'odd' register if necessary.
6677     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6678       State.AllocateReg(ArgGPRs);
6679   }
6680 
6681   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6682   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6683       State.getPendingArgFlags();
6684 
6685   assert(PendingLocs.size() == PendingArgFlags.size() &&
6686          "PendingLocs and PendingArgFlags out of sync");
6687 
6688   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6689   // registers are exhausted.
6690   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6691     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6692            "Can't lower f64 if it is split");
6693     // Depending on available argument GPRS, f64 may be passed in a pair of
6694     // GPRs, split between a GPR and the stack, or passed completely on the
6695     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6696     // cases.
6697     Register Reg = State.AllocateReg(ArgGPRs);
6698     LocVT = MVT::i32;
6699     if (!Reg) {
6700       unsigned StackOffset = State.AllocateStack(8, Align(8));
6701       State.addLoc(
6702           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6703       return false;
6704     }
6705     if (!State.AllocateReg(ArgGPRs))
6706       State.AllocateStack(4, Align(4));
6707     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6708     return false;
6709   }
6710 
6711   // Fixed-length vectors are located in the corresponding scalable-vector
6712   // container types.
6713   if (ValVT.isFixedLengthVector())
6714     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6715 
6716   // Split arguments might be passed indirectly, so keep track of the pending
6717   // values. Split vectors are passed via a mix of registers and indirectly, so
6718   // treat them as we would any other argument.
6719   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6720     LocVT = XLenVT;
6721     LocInfo = CCValAssign::Indirect;
6722     PendingLocs.push_back(
6723         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6724     PendingArgFlags.push_back(ArgFlags);
6725     if (!ArgFlags.isSplitEnd()) {
6726       return false;
6727     }
6728   }
6729 
6730   // If the split argument only had two elements, it should be passed directly
6731   // in registers or on the stack.
6732   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6733     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6734     // Apply the normal calling convention rules to the first half of the
6735     // split argument.
6736     CCValAssign VA = PendingLocs[0];
6737     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6738     PendingLocs.clear();
6739     PendingArgFlags.clear();
6740     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6741                                ArgFlags);
6742   }
6743 
6744   // Allocate to a register if possible, or else a stack slot.
6745   Register Reg;
6746   unsigned StoreSizeBytes = XLen / 8;
6747   Align StackAlign = Align(XLen / 8);
6748 
6749   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6750     Reg = State.AllocateReg(ArgFPR16s);
6751   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6752     Reg = State.AllocateReg(ArgFPR32s);
6753   else if (ValVT == MVT::f64 && !UseGPRForF64)
6754     Reg = State.AllocateReg(ArgFPR64s);
6755   else if (ValVT.isVector()) {
6756     Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
6757     if (!Reg) {
6758       // For return values, the vector must be passed fully via registers or
6759       // via the stack.
6760       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6761       // but we're using all of them.
6762       if (IsRet)
6763         return true;
6764       // Try using a GPR to pass the address
6765       if ((Reg = State.AllocateReg(ArgGPRs))) {
6766         LocVT = XLenVT;
6767         LocInfo = CCValAssign::Indirect;
6768       } else if (ValVT.isScalableVector()) {
6769         report_fatal_error("Unable to pass scalable vector types on the stack");
6770       } else {
6771         // Pass fixed-length vectors on the stack.
6772         LocVT = ValVT;
6773         StoreSizeBytes = ValVT.getStoreSize();
6774         // Align vectors to their element sizes, being careful for vXi1
6775         // vectors.
6776         StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
6777       }
6778     }
6779   } else {
6780     Reg = State.AllocateReg(ArgGPRs);
6781   }
6782 
6783   unsigned StackOffset =
6784       Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
6785 
6786   // If we reach this point and PendingLocs is non-empty, we must be at the
6787   // end of a split argument that must be passed indirectly.
6788   if (!PendingLocs.empty()) {
6789     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6790     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6791 
6792     for (auto &It : PendingLocs) {
6793       if (Reg)
6794         It.convertToReg(Reg);
6795       else
6796         It.convertToMem(StackOffset);
6797       State.addLoc(It);
6798     }
6799     PendingLocs.clear();
6800     PendingArgFlags.clear();
6801     return false;
6802   }
6803 
6804   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6805           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6806          "Expected an XLenVT or vector types at this stage");
6807 
6808   if (Reg) {
6809     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6810     return false;
6811   }
6812 
6813   // When a floating-point value is passed on the stack, no bit-conversion is
6814   // needed.
6815   if (ValVT.isFloatingPoint()) {
6816     LocVT = ValVT;
6817     LocInfo = CCValAssign::Full;
6818   }
6819   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6820   return false;
6821 }
6822 
6823 template <typename ArgTy>
6824 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6825   for (const auto &ArgIdx : enumerate(Args)) {
6826     MVT ArgVT = ArgIdx.value().VT;
6827     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6828       return ArgIdx.index();
6829   }
6830   return None;
6831 }
6832 
6833 void RISCVTargetLowering::analyzeInputArgs(
6834     MachineFunction &MF, CCState &CCInfo,
6835     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
6836     RISCVCCAssignFn Fn) const {
6837   unsigned NumArgs = Ins.size();
6838   FunctionType *FType = MF.getFunction().getFunctionType();
6839 
6840   Optional<unsigned> FirstMaskArgument;
6841   if (Subtarget.hasStdExtV())
6842     FirstMaskArgument = preAssignMask(Ins);
6843 
6844   for (unsigned i = 0; i != NumArgs; ++i) {
6845     MVT ArgVT = Ins[i].VT;
6846     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6847 
6848     Type *ArgTy = nullptr;
6849     if (IsRet)
6850       ArgTy = FType->getReturnType();
6851     else if (Ins[i].isOrigArg())
6852       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6853 
6854     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6855     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6856            ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
6857            FirstMaskArgument)) {
6858       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
6859                         << EVT(ArgVT).getEVTString() << '\n');
6860       llvm_unreachable(nullptr);
6861     }
6862   }
6863 }
6864 
6865 void RISCVTargetLowering::analyzeOutputArgs(
6866     MachineFunction &MF, CCState &CCInfo,
6867     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
6868     CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
6869   unsigned NumArgs = Outs.size();
6870 
6871   Optional<unsigned> FirstMaskArgument;
6872   if (Subtarget.hasStdExtV())
6873     FirstMaskArgument = preAssignMask(Outs);
6874 
6875   for (unsigned i = 0; i != NumArgs; i++) {
6876     MVT ArgVT = Outs[i].VT;
6877     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6878     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
6879 
6880     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6881     if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6882            ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
6883            FirstMaskArgument)) {
6884       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
6885                         << EVT(ArgVT).getEVTString() << "\n");
6886       llvm_unreachable(nullptr);
6887     }
6888   }
6889 }
6890 
6891 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
6892 // values.
6893 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
6894                                    const CCValAssign &VA, const SDLoc &DL,
6895                                    const RISCVSubtarget &Subtarget) {
6896   switch (VA.getLocInfo()) {
6897   default:
6898     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6899   case CCValAssign::Full:
6900     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
6901       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
6902     break;
6903   case CCValAssign::BCvt:
6904     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6905       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
6906     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6907       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
6908     else
6909       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6910     break;
6911   }
6912   return Val;
6913 }
6914 
6915 // The caller is responsible for loading the full value if the argument is
6916 // passed with CCValAssign::Indirect.
6917 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
6918                                 const CCValAssign &VA, const SDLoc &DL,
6919                                 const RISCVTargetLowering &TLI) {
6920   MachineFunction &MF = DAG.getMachineFunction();
6921   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6922   EVT LocVT = VA.getLocVT();
6923   SDValue Val;
6924   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
6925   Register VReg = RegInfo.createVirtualRegister(RC);
6926   RegInfo.addLiveIn(VA.getLocReg(), VReg);
6927   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
6928 
6929   if (VA.getLocInfo() == CCValAssign::Indirect)
6930     return Val;
6931 
6932   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
6933 }
6934 
6935 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
6936                                    const CCValAssign &VA, const SDLoc &DL,
6937                                    const RISCVSubtarget &Subtarget) {
6938   EVT LocVT = VA.getLocVT();
6939 
6940   switch (VA.getLocInfo()) {
6941   default:
6942     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6943   case CCValAssign::Full:
6944     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
6945       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
6946     break;
6947   case CCValAssign::BCvt:
6948     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6949       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
6950     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6951       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
6952     else
6953       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
6954     break;
6955   }
6956   return Val;
6957 }
6958 
6959 // The caller is responsible for loading the full value if the argument is
6960 // passed with CCValAssign::Indirect.
6961 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
6962                                 const CCValAssign &VA, const SDLoc &DL) {
6963   MachineFunction &MF = DAG.getMachineFunction();
6964   MachineFrameInfo &MFI = MF.getFrameInfo();
6965   EVT LocVT = VA.getLocVT();
6966   EVT ValVT = VA.getValVT();
6967   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
6968   int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
6969                                  /*Immutable=*/true);
6970   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6971   SDValue Val;
6972 
6973   ISD::LoadExtType ExtType;
6974   switch (VA.getLocInfo()) {
6975   default:
6976     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6977   case CCValAssign::Full:
6978   case CCValAssign::Indirect:
6979   case CCValAssign::BCvt:
6980     ExtType = ISD::NON_EXTLOAD;
6981     break;
6982   }
6983   Val = DAG.getExtLoad(
6984       ExtType, DL, LocVT, Chain, FIN,
6985       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
6986   return Val;
6987 }
6988 
6989 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
6990                                        const CCValAssign &VA, const SDLoc &DL) {
6991   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
6992          "Unexpected VA");
6993   MachineFunction &MF = DAG.getMachineFunction();
6994   MachineFrameInfo &MFI = MF.getFrameInfo();
6995   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6996 
6997   if (VA.isMemLoc()) {
6998     // f64 is passed on the stack.
6999     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
7000     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7001     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
7002                        MachinePointerInfo::getFixedStack(MF, FI));
7003   }
7004 
7005   assert(VA.isRegLoc() && "Expected register VA assignment");
7006 
7007   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7008   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
7009   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
7010   SDValue Hi;
7011   if (VA.getLocReg() == RISCV::X17) {
7012     // Second half of f64 is passed on the stack.
7013     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
7014     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
7015     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
7016                      MachinePointerInfo::getFixedStack(MF, FI));
7017   } else {
7018     // Second half of f64 is passed in another GPR.
7019     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7020     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
7021     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
7022   }
7023   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
7024 }
7025 
7026 // FastCC has less than 1% performance improvement for some particular
7027 // benchmark. But theoretically, it may has benenfit for some cases.
7028 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
7029                             unsigned ValNo, MVT ValVT, MVT LocVT,
7030                             CCValAssign::LocInfo LocInfo,
7031                             ISD::ArgFlagsTy ArgFlags, CCState &State,
7032                             bool IsFixed, bool IsRet, Type *OrigTy,
7033                             const RISCVTargetLowering &TLI,
7034                             Optional<unsigned> FirstMaskArgument) {
7035 
7036   // X5 and X6 might be used for save-restore libcall.
7037   static const MCPhysReg GPRList[] = {
7038       RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
7039       RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
7040       RISCV::X29, RISCV::X30, RISCV::X31};
7041 
7042   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7043     if (unsigned Reg = State.AllocateReg(GPRList)) {
7044       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7045       return false;
7046     }
7047   }
7048 
7049   if (LocVT == MVT::f16) {
7050     static const MCPhysReg FPR16List[] = {
7051         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
7052         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
7053         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
7054         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
7055     if (unsigned Reg = State.AllocateReg(FPR16List)) {
7056       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7057       return false;
7058     }
7059   }
7060 
7061   if (LocVT == MVT::f32) {
7062     static const MCPhysReg FPR32List[] = {
7063         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
7064         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
7065         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
7066         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
7067     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7068       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7069       return false;
7070     }
7071   }
7072 
7073   if (LocVT == MVT::f64) {
7074     static const MCPhysReg FPR64List[] = {
7075         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
7076         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
7077         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
7078         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
7079     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7080       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7081       return false;
7082     }
7083   }
7084 
7085   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
7086     unsigned Offset4 = State.AllocateStack(4, Align(4));
7087     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
7088     return false;
7089   }
7090 
7091   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
7092     unsigned Offset5 = State.AllocateStack(8, Align(8));
7093     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
7094     return false;
7095   }
7096 
7097   if (LocVT.isVector()) {
7098     if (unsigned Reg =
7099             allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
7100       // Fixed-length vectors are located in the corresponding scalable-vector
7101       // container types.
7102       if (ValVT.isFixedLengthVector())
7103         LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7104       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7105     } else {
7106       // Try and pass the address via a "fast" GPR.
7107       if (unsigned GPRReg = State.AllocateReg(GPRList)) {
7108         LocInfo = CCValAssign::Indirect;
7109         LocVT = TLI.getSubtarget().getXLenVT();
7110         State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
7111       } else if (ValVT.isFixedLengthVector()) {
7112         auto StackAlign =
7113             MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
7114         unsigned StackOffset =
7115             State.AllocateStack(ValVT.getStoreSize(), StackAlign);
7116         State.addLoc(
7117             CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7118       } else {
7119         // Can't pass scalable vectors on the stack.
7120         return true;
7121       }
7122     }
7123 
7124     return false;
7125   }
7126 
7127   return true; // CC didn't match.
7128 }
7129 
7130 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
7131                          CCValAssign::LocInfo LocInfo,
7132                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
7133 
7134   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
7135     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
7136     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
7137     static const MCPhysReg GPRList[] = {
7138         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
7139         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
7140     if (unsigned Reg = State.AllocateReg(GPRList)) {
7141       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7142       return false;
7143     }
7144   }
7145 
7146   if (LocVT == MVT::f32) {
7147     // Pass in STG registers: F1, ..., F6
7148     //                        fs0 ... fs5
7149     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
7150                                           RISCV::F18_F, RISCV::F19_F,
7151                                           RISCV::F20_F, RISCV::F21_F};
7152     if (unsigned Reg = State.AllocateReg(FPR32List)) {
7153       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7154       return false;
7155     }
7156   }
7157 
7158   if (LocVT == MVT::f64) {
7159     // Pass in STG registers: D1, ..., D6
7160     //                        fs6 ... fs11
7161     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
7162                                           RISCV::F24_D, RISCV::F25_D,
7163                                           RISCV::F26_D, RISCV::F27_D};
7164     if (unsigned Reg = State.AllocateReg(FPR64List)) {
7165       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7166       return false;
7167     }
7168   }
7169 
7170   report_fatal_error("No registers left in GHC calling convention");
7171   return true;
7172 }
7173 
7174 // Transform physical registers into virtual registers.
7175 SDValue RISCVTargetLowering::LowerFormalArguments(
7176     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
7177     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
7178     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7179 
7180   MachineFunction &MF = DAG.getMachineFunction();
7181 
7182   switch (CallConv) {
7183   default:
7184     report_fatal_error("Unsupported calling convention");
7185   case CallingConv::C:
7186   case CallingConv::Fast:
7187     break;
7188   case CallingConv::GHC:
7189     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
7190         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
7191       report_fatal_error(
7192         "GHC calling convention requires the F and D instruction set extensions");
7193   }
7194 
7195   const Function &Func = MF.getFunction();
7196   if (Func.hasFnAttribute("interrupt")) {
7197     if (!Func.arg_empty())
7198       report_fatal_error(
7199         "Functions with the interrupt attribute cannot have arguments!");
7200 
7201     StringRef Kind =
7202       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7203 
7204     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
7205       report_fatal_error(
7206         "Function interrupt attribute argument not supported!");
7207   }
7208 
7209   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7210   MVT XLenVT = Subtarget.getXLenVT();
7211   unsigned XLenInBytes = Subtarget.getXLen() / 8;
7212   // Used with vargs to acumulate store chains.
7213   std::vector<SDValue> OutChains;
7214 
7215   // Assign locations to all of the incoming arguments.
7216   SmallVector<CCValAssign, 16> ArgLocs;
7217   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7218 
7219   if (CallConv == CallingConv::GHC)
7220     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
7221   else
7222     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
7223                      CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7224                                                    : CC_RISCV);
7225 
7226   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
7227     CCValAssign &VA = ArgLocs[i];
7228     SDValue ArgValue;
7229     // Passing f64 on RV32D with a soft float ABI must be handled as a special
7230     // case.
7231     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
7232       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
7233     else if (VA.isRegLoc())
7234       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
7235     else
7236       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
7237 
7238     if (VA.getLocInfo() == CCValAssign::Indirect) {
7239       // If the original argument was split and passed by reference (e.g. i128
7240       // on RV32), we need to load all parts of it here (using the same
7241       // address). Vectors may be partly split to registers and partly to the
7242       // stack, in which case the base address is partly offset and subsequent
7243       // stores are relative to that.
7244       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
7245                                    MachinePointerInfo()));
7246       unsigned ArgIndex = Ins[i].OrigArgIndex;
7247       unsigned ArgPartOffset = Ins[i].PartOffset;
7248       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7249       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
7250         CCValAssign &PartVA = ArgLocs[i + 1];
7251         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
7252         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7253         if (PartVA.getValVT().isScalableVector())
7254           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7255         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
7256         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
7257                                      MachinePointerInfo()));
7258         ++i;
7259       }
7260       continue;
7261     }
7262     InVals.push_back(ArgValue);
7263   }
7264 
7265   if (IsVarArg) {
7266     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
7267     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
7268     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
7269     MachineFrameInfo &MFI = MF.getFrameInfo();
7270     MachineRegisterInfo &RegInfo = MF.getRegInfo();
7271     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
7272 
7273     // Offset of the first variable argument from stack pointer, and size of
7274     // the vararg save area. For now, the varargs save area is either zero or
7275     // large enough to hold a0-a7.
7276     int VaArgOffset, VarArgsSaveSize;
7277 
7278     // If all registers are allocated, then all varargs must be passed on the
7279     // stack and we don't need to save any argregs.
7280     if (ArgRegs.size() == Idx) {
7281       VaArgOffset = CCInfo.getNextStackOffset();
7282       VarArgsSaveSize = 0;
7283     } else {
7284       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
7285       VaArgOffset = -VarArgsSaveSize;
7286     }
7287 
7288     // Record the frame index of the first variable argument
7289     // which is a value necessary to VASTART.
7290     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7291     RVFI->setVarArgsFrameIndex(FI);
7292 
7293     // If saving an odd number of registers then create an extra stack slot to
7294     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
7295     // offsets to even-numbered registered remain 2*XLEN-aligned.
7296     if (Idx % 2) {
7297       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
7298       VarArgsSaveSize += XLenInBytes;
7299     }
7300 
7301     // Copy the integer registers that may have been used for passing varargs
7302     // to the vararg save area.
7303     for (unsigned I = Idx; I < ArgRegs.size();
7304          ++I, VaArgOffset += XLenInBytes) {
7305       const Register Reg = RegInfo.createVirtualRegister(RC);
7306       RegInfo.addLiveIn(ArgRegs[I], Reg);
7307       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7308       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7309       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7310       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7311                                    MachinePointerInfo::getFixedStack(MF, FI));
7312       cast<StoreSDNode>(Store.getNode())
7313           ->getMemOperand()
7314           ->setValue((Value *)nullptr);
7315       OutChains.push_back(Store);
7316     }
7317     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7318   }
7319 
7320   // All stores are grouped in one node to allow the matching between
7321   // the size of Ins and InVals. This only happens for vararg functions.
7322   if (!OutChains.empty()) {
7323     OutChains.push_back(Chain);
7324     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7325   }
7326 
7327   return Chain;
7328 }
7329 
7330 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7331 /// for tail call optimization.
7332 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7333 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7334     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7335     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7336 
7337   auto &Callee = CLI.Callee;
7338   auto CalleeCC = CLI.CallConv;
7339   auto &Outs = CLI.Outs;
7340   auto &Caller = MF.getFunction();
7341   auto CallerCC = Caller.getCallingConv();
7342 
7343   // Exception-handling functions need a special set of instructions to
7344   // indicate a return to the hardware. Tail-calling another function would
7345   // probably break this.
7346   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7347   // should be expanded as new function attributes are introduced.
7348   if (Caller.hasFnAttribute("interrupt"))
7349     return false;
7350 
7351   // Do not tail call opt if the stack is used to pass parameters.
7352   if (CCInfo.getNextStackOffset() != 0)
7353     return false;
7354 
7355   // Do not tail call opt if any parameters need to be passed indirectly.
7356   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7357   // passed indirectly. So the address of the value will be passed in a
7358   // register, or if not available, then the address is put on the stack. In
7359   // order to pass indirectly, space on the stack often needs to be allocated
7360   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7361   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7362   // are passed CCValAssign::Indirect.
7363   for (auto &VA : ArgLocs)
7364     if (VA.getLocInfo() == CCValAssign::Indirect)
7365       return false;
7366 
7367   // Do not tail call opt if either caller or callee uses struct return
7368   // semantics.
7369   auto IsCallerStructRet = Caller.hasStructRetAttr();
7370   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7371   if (IsCallerStructRet || IsCalleeStructRet)
7372     return false;
7373 
7374   // Externally-defined functions with weak linkage should not be
7375   // tail-called. The behaviour of branch instructions in this situation (as
7376   // used for tail calls) is implementation-defined, so we cannot rely on the
7377   // linker replacing the tail call with a return.
7378   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7379     const GlobalValue *GV = G->getGlobal();
7380     if (GV->hasExternalWeakLinkage())
7381       return false;
7382   }
7383 
7384   // The callee has to preserve all registers the caller needs to preserve.
7385   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7386   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7387   if (CalleeCC != CallerCC) {
7388     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7389     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7390       return false;
7391   }
7392 
7393   // Byval parameters hand the function a pointer directly into the stack area
7394   // we want to reuse during a tail call. Working around this *is* possible
7395   // but less efficient and uglier in LowerCall.
7396   for (auto &Arg : Outs)
7397     if (Arg.Flags.isByVal())
7398       return false;
7399 
7400   return true;
7401 }
7402 
7403 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
7404   return DAG.getDataLayout().getPrefTypeAlign(
7405       VT.getTypeForEVT(*DAG.getContext()));
7406 }
7407 
7408 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7409 // and output parameter nodes.
7410 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7411                                        SmallVectorImpl<SDValue> &InVals) const {
7412   SelectionDAG &DAG = CLI.DAG;
7413   SDLoc &DL = CLI.DL;
7414   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7415   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7416   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7417   SDValue Chain = CLI.Chain;
7418   SDValue Callee = CLI.Callee;
7419   bool &IsTailCall = CLI.IsTailCall;
7420   CallingConv::ID CallConv = CLI.CallConv;
7421   bool IsVarArg = CLI.IsVarArg;
7422   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7423   MVT XLenVT = Subtarget.getXLenVT();
7424 
7425   MachineFunction &MF = DAG.getMachineFunction();
7426 
7427   // Analyze the operands of the call, assigning locations to each operand.
7428   SmallVector<CCValAssign, 16> ArgLocs;
7429   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7430 
7431   if (CallConv == CallingConv::GHC)
7432     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7433   else
7434     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
7435                       CallConv == CallingConv::Fast ? CC_RISCV_FastCC
7436                                                     : CC_RISCV);
7437 
7438   // Check if it's really possible to do a tail call.
7439   if (IsTailCall)
7440     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7441 
7442   if (IsTailCall)
7443     ++NumTailCalls;
7444   else if (CLI.CB && CLI.CB->isMustTailCall())
7445     report_fatal_error("failed to perform tail call elimination on a call "
7446                        "site marked musttail");
7447 
7448   // Get a count of how many bytes are to be pushed on the stack.
7449   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7450 
7451   // Create local copies for byval args
7452   SmallVector<SDValue, 8> ByValArgs;
7453   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7454     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7455     if (!Flags.isByVal())
7456       continue;
7457 
7458     SDValue Arg = OutVals[i];
7459     unsigned Size = Flags.getByValSize();
7460     Align Alignment = Flags.getNonZeroByValAlign();
7461 
7462     int FI =
7463         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7464     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7465     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7466 
7467     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7468                           /*IsVolatile=*/false,
7469                           /*AlwaysInline=*/false, IsTailCall,
7470                           MachinePointerInfo(), MachinePointerInfo());
7471     ByValArgs.push_back(FIPtr);
7472   }
7473 
7474   if (!IsTailCall)
7475     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7476 
7477   // Copy argument values to their designated locations.
7478   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7479   SmallVector<SDValue, 8> MemOpChains;
7480   SDValue StackPtr;
7481   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7482     CCValAssign &VA = ArgLocs[i];
7483     SDValue ArgValue = OutVals[i];
7484     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7485 
7486     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7487     bool IsF64OnRV32DSoftABI =
7488         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7489     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7490       SDValue SplitF64 = DAG.getNode(
7491           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7492       SDValue Lo = SplitF64.getValue(0);
7493       SDValue Hi = SplitF64.getValue(1);
7494 
7495       Register RegLo = VA.getLocReg();
7496       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7497 
7498       if (RegLo == RISCV::X17) {
7499         // Second half of f64 is passed on the stack.
7500         // Work out the address of the stack slot.
7501         if (!StackPtr.getNode())
7502           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7503         // Emit the store.
7504         MemOpChains.push_back(
7505             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7506       } else {
7507         // Second half of f64 is passed in another GPR.
7508         assert(RegLo < RISCV::X31 && "Invalid register pair");
7509         Register RegHigh = RegLo + 1;
7510         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7511       }
7512       continue;
7513     }
7514 
7515     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7516     // as any other MemLoc.
7517 
7518     // Promote the value if needed.
7519     // For now, only handle fully promoted and indirect arguments.
7520     if (VA.getLocInfo() == CCValAssign::Indirect) {
7521       // Store the argument in a stack slot and pass its address.
7522       Align StackAlign =
7523           std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
7524                    getPrefTypeAlign(ArgValue.getValueType(), DAG));
7525       TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
7526       // If the original argument was split (e.g. i128), we need
7527       // to store the required parts of it here (and pass just one address).
7528       // Vectors may be partly split to registers and partly to the stack, in
7529       // which case the base address is partly offset and subsequent stores are
7530       // relative to that.
7531       unsigned ArgIndex = Outs[i].OrigArgIndex;
7532       unsigned ArgPartOffset = Outs[i].PartOffset;
7533       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7534       // Calculate the total size to store. We don't have access to what we're
7535       // actually storing other than performing the loop and collecting the
7536       // info.
7537       SmallVector<std::pair<SDValue, SDValue>> Parts;
7538       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7539         SDValue PartValue = OutVals[i + 1];
7540         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7541         SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
7542         EVT PartVT = PartValue.getValueType();
7543         if (PartVT.isScalableVector())
7544           Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
7545         StoredSize += PartVT.getStoreSize();
7546         StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
7547         Parts.push_back(std::make_pair(PartValue, Offset));
7548         ++i;
7549       }
7550       SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
7551       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7552       MemOpChains.push_back(
7553           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7554                        MachinePointerInfo::getFixedStack(MF, FI)));
7555       for (const auto &Part : Parts) {
7556         SDValue PartValue = Part.first;
7557         SDValue PartOffset = Part.second;
7558         SDValue Address =
7559             DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
7560         MemOpChains.push_back(
7561             DAG.getStore(Chain, DL, PartValue, Address,
7562                          MachinePointerInfo::getFixedStack(MF, FI)));
7563       }
7564       ArgValue = SpillSlot;
7565     } else {
7566       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7567     }
7568 
7569     // Use local copy if it is a byval arg.
7570     if (Flags.isByVal())
7571       ArgValue = ByValArgs[j++];
7572 
7573     if (VA.isRegLoc()) {
7574       // Queue up the argument copies and emit them at the end.
7575       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7576     } else {
7577       assert(VA.isMemLoc() && "Argument not register or memory");
7578       assert(!IsTailCall && "Tail call not allowed if stack is used "
7579                             "for passing parameters");
7580 
7581       // Work out the address of the stack slot.
7582       if (!StackPtr.getNode())
7583         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7584       SDValue Address =
7585           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7586                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7587 
7588       // Emit the store.
7589       MemOpChains.push_back(
7590           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7591     }
7592   }
7593 
7594   // Join the stores, which are independent of one another.
7595   if (!MemOpChains.empty())
7596     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7597 
7598   SDValue Glue;
7599 
7600   // Build a sequence of copy-to-reg nodes, chained and glued together.
7601   for (auto &Reg : RegsToPass) {
7602     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7603     Glue = Chain.getValue(1);
7604   }
7605 
7606   // Validate that none of the argument registers have been marked as
7607   // reserved, if so report an error. Do the same for the return address if this
7608   // is not a tailcall.
7609   validateCCReservedRegs(RegsToPass, MF);
7610   if (!IsTailCall &&
7611       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7612     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7613         MF.getFunction(),
7614         "Return address register required, but has been reserved."});
7615 
7616   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7617   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7618   // split it and then direct call can be matched by PseudoCALL.
7619   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7620     const GlobalValue *GV = S->getGlobal();
7621 
7622     unsigned OpFlags = RISCVII::MO_CALL;
7623     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7624       OpFlags = RISCVII::MO_PLT;
7625 
7626     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7627   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7628     unsigned OpFlags = RISCVII::MO_CALL;
7629 
7630     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7631                                                  nullptr))
7632       OpFlags = RISCVII::MO_PLT;
7633 
7634     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7635   }
7636 
7637   // The first call operand is the chain and the second is the target address.
7638   SmallVector<SDValue, 8> Ops;
7639   Ops.push_back(Chain);
7640   Ops.push_back(Callee);
7641 
7642   // Add argument registers to the end of the list so that they are
7643   // known live into the call.
7644   for (auto &Reg : RegsToPass)
7645     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7646 
7647   if (!IsTailCall) {
7648     // Add a register mask operand representing the call-preserved registers.
7649     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7650     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7651     assert(Mask && "Missing call preserved mask for calling convention");
7652     Ops.push_back(DAG.getRegisterMask(Mask));
7653   }
7654 
7655   // Glue the call to the argument copies, if any.
7656   if (Glue.getNode())
7657     Ops.push_back(Glue);
7658 
7659   // Emit the call.
7660   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7661 
7662   if (IsTailCall) {
7663     MF.getFrameInfo().setHasTailCall();
7664     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7665   }
7666 
7667   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7668   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7669   Glue = Chain.getValue(1);
7670 
7671   // Mark the end of the call, which is glued to the call itself.
7672   Chain = DAG.getCALLSEQ_END(Chain,
7673                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7674                              DAG.getConstant(0, DL, PtrVT, true),
7675                              Glue, DL);
7676   Glue = Chain.getValue(1);
7677 
7678   // Assign locations to each value returned by this call.
7679   SmallVector<CCValAssign, 16> RVLocs;
7680   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7681   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
7682 
7683   // Copy all of the result registers out of their specified physreg.
7684   for (auto &VA : RVLocs) {
7685     // Copy the value out
7686     SDValue RetValue =
7687         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7688     // Glue the RetValue to the end of the call sequence
7689     Chain = RetValue.getValue(1);
7690     Glue = RetValue.getValue(2);
7691 
7692     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7693       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7694       SDValue RetValue2 =
7695           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7696       Chain = RetValue2.getValue(1);
7697       Glue = RetValue2.getValue(2);
7698       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7699                              RetValue2);
7700     }
7701 
7702     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7703 
7704     InVals.push_back(RetValue);
7705   }
7706 
7707   return Chain;
7708 }
7709 
7710 bool RISCVTargetLowering::CanLowerReturn(
7711     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7712     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7713   SmallVector<CCValAssign, 16> RVLocs;
7714   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7715 
7716   Optional<unsigned> FirstMaskArgument;
7717   if (Subtarget.hasStdExtV())
7718     FirstMaskArgument = preAssignMask(Outs);
7719 
7720   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7721     MVT VT = Outs[i].VT;
7722     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7723     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7724     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7725                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7726                  *this, FirstMaskArgument))
7727       return false;
7728   }
7729   return true;
7730 }
7731 
7732 SDValue
7733 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7734                                  bool IsVarArg,
7735                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7736                                  const SmallVectorImpl<SDValue> &OutVals,
7737                                  const SDLoc &DL, SelectionDAG &DAG) const {
7738   const MachineFunction &MF = DAG.getMachineFunction();
7739   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7740 
7741   // Stores the assignment of the return value to a location.
7742   SmallVector<CCValAssign, 16> RVLocs;
7743 
7744   // Info about the registers and stack slot.
7745   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7746                  *DAG.getContext());
7747 
7748   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7749                     nullptr, CC_RISCV);
7750 
7751   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7752     report_fatal_error("GHC functions return void only");
7753 
7754   SDValue Glue;
7755   SmallVector<SDValue, 4> RetOps(1, Chain);
7756 
7757   // Copy the result values into the output registers.
7758   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7759     SDValue Val = OutVals[i];
7760     CCValAssign &VA = RVLocs[i];
7761     assert(VA.isRegLoc() && "Can only return in registers!");
7762 
7763     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7764       // Handle returning f64 on RV32D with a soft float ABI.
7765       assert(VA.isRegLoc() && "Expected return via registers");
7766       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7767                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7768       SDValue Lo = SplitF64.getValue(0);
7769       SDValue Hi = SplitF64.getValue(1);
7770       Register RegLo = VA.getLocReg();
7771       assert(RegLo < RISCV::X31 && "Invalid register pair");
7772       Register RegHi = RegLo + 1;
7773 
7774       if (STI.isRegisterReservedByUser(RegLo) ||
7775           STI.isRegisterReservedByUser(RegHi))
7776         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7777             MF.getFunction(),
7778             "Return value register required, but has been reserved."});
7779 
7780       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7781       Glue = Chain.getValue(1);
7782       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7783       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7784       Glue = Chain.getValue(1);
7785       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7786     } else {
7787       // Handle a 'normal' return.
7788       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7789       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7790 
7791       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7792         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7793             MF.getFunction(),
7794             "Return value register required, but has been reserved."});
7795 
7796       // Guarantee that all emitted copies are stuck together.
7797       Glue = Chain.getValue(1);
7798       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7799     }
7800   }
7801 
7802   RetOps[0] = Chain; // Update chain.
7803 
7804   // Add the glue node if we have it.
7805   if (Glue.getNode()) {
7806     RetOps.push_back(Glue);
7807   }
7808 
7809   // Interrupt service routines use different return instructions.
7810   const Function &Func = DAG.getMachineFunction().getFunction();
7811   if (Func.hasFnAttribute("interrupt")) {
7812     if (!Func.getReturnType()->isVoidTy())
7813       report_fatal_error(
7814           "Functions with the interrupt attribute must have void return type!");
7815 
7816     MachineFunction &MF = DAG.getMachineFunction();
7817     StringRef Kind =
7818       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7819 
7820     unsigned RetOpc;
7821     if (Kind == "user")
7822       RetOpc = RISCVISD::URET_FLAG;
7823     else if (Kind == "supervisor")
7824       RetOpc = RISCVISD::SRET_FLAG;
7825     else
7826       RetOpc = RISCVISD::MRET_FLAG;
7827 
7828     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7829   }
7830 
7831   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7832 }
7833 
7834 void RISCVTargetLowering::validateCCReservedRegs(
7835     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7836     MachineFunction &MF) const {
7837   const Function &F = MF.getFunction();
7838   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7839 
7840   if (llvm::any_of(Regs, [&STI](auto Reg) {
7841         return STI.isRegisterReservedByUser(Reg.first);
7842       }))
7843     F.getContext().diagnose(DiagnosticInfoUnsupported{
7844         F, "Argument register required, but has been reserved."});
7845 }
7846 
7847 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7848   return CI->isTailCall();
7849 }
7850 
7851 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7852 #define NODE_NAME_CASE(NODE)                                                   \
7853   case RISCVISD::NODE:                                                         \
7854     return "RISCVISD::" #NODE;
7855   // clang-format off
7856   switch ((RISCVISD::NodeType)Opcode) {
7857   case RISCVISD::FIRST_NUMBER:
7858     break;
7859   NODE_NAME_CASE(RET_FLAG)
7860   NODE_NAME_CASE(URET_FLAG)
7861   NODE_NAME_CASE(SRET_FLAG)
7862   NODE_NAME_CASE(MRET_FLAG)
7863   NODE_NAME_CASE(CALL)
7864   NODE_NAME_CASE(SELECT_CC)
7865   NODE_NAME_CASE(BR_CC)
7866   NODE_NAME_CASE(BuildPairF64)
7867   NODE_NAME_CASE(SplitF64)
7868   NODE_NAME_CASE(TAIL)
7869   NODE_NAME_CASE(MULHSU)
7870   NODE_NAME_CASE(SLLW)
7871   NODE_NAME_CASE(SRAW)
7872   NODE_NAME_CASE(SRLW)
7873   NODE_NAME_CASE(DIVW)
7874   NODE_NAME_CASE(DIVUW)
7875   NODE_NAME_CASE(REMUW)
7876   NODE_NAME_CASE(ROLW)
7877   NODE_NAME_CASE(RORW)
7878   NODE_NAME_CASE(CLZW)
7879   NODE_NAME_CASE(CTZW)
7880   NODE_NAME_CASE(FSLW)
7881   NODE_NAME_CASE(FSRW)
7882   NODE_NAME_CASE(FSL)
7883   NODE_NAME_CASE(FSR)
7884   NODE_NAME_CASE(FMV_H_X)
7885   NODE_NAME_CASE(FMV_X_ANYEXTH)
7886   NODE_NAME_CASE(FMV_W_X_RV64)
7887   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
7888   NODE_NAME_CASE(READ_CYCLE_WIDE)
7889   NODE_NAME_CASE(GREV)
7890   NODE_NAME_CASE(GREVW)
7891   NODE_NAME_CASE(GORC)
7892   NODE_NAME_CASE(GORCW)
7893   NODE_NAME_CASE(SHFL)
7894   NODE_NAME_CASE(SHFLW)
7895   NODE_NAME_CASE(UNSHFL)
7896   NODE_NAME_CASE(UNSHFLW)
7897   NODE_NAME_CASE(BCOMPRESS)
7898   NODE_NAME_CASE(BCOMPRESSW)
7899   NODE_NAME_CASE(BDECOMPRESS)
7900   NODE_NAME_CASE(BDECOMPRESSW)
7901   NODE_NAME_CASE(VMV_V_X_VL)
7902   NODE_NAME_CASE(VFMV_V_F_VL)
7903   NODE_NAME_CASE(VMV_X_S)
7904   NODE_NAME_CASE(VMV_S_X_VL)
7905   NODE_NAME_CASE(VFMV_S_F_VL)
7906   NODE_NAME_CASE(SPLAT_VECTOR_I64)
7907   NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
7908   NODE_NAME_CASE(READ_VLENB)
7909   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
7910   NODE_NAME_CASE(VSLIDEUP_VL)
7911   NODE_NAME_CASE(VSLIDE1UP_VL)
7912   NODE_NAME_CASE(VSLIDEDOWN_VL)
7913   NODE_NAME_CASE(VSLIDE1DOWN_VL)
7914   NODE_NAME_CASE(VID_VL)
7915   NODE_NAME_CASE(VFNCVT_ROD_VL)
7916   NODE_NAME_CASE(VECREDUCE_ADD_VL)
7917   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
7918   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
7919   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
7920   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
7921   NODE_NAME_CASE(VECREDUCE_AND_VL)
7922   NODE_NAME_CASE(VECREDUCE_OR_VL)
7923   NODE_NAME_CASE(VECREDUCE_XOR_VL)
7924   NODE_NAME_CASE(VECREDUCE_FADD_VL)
7925   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
7926   NODE_NAME_CASE(VECREDUCE_FMIN_VL)
7927   NODE_NAME_CASE(VECREDUCE_FMAX_VL)
7928   NODE_NAME_CASE(ADD_VL)
7929   NODE_NAME_CASE(AND_VL)
7930   NODE_NAME_CASE(MUL_VL)
7931   NODE_NAME_CASE(OR_VL)
7932   NODE_NAME_CASE(SDIV_VL)
7933   NODE_NAME_CASE(SHL_VL)
7934   NODE_NAME_CASE(SREM_VL)
7935   NODE_NAME_CASE(SRA_VL)
7936   NODE_NAME_CASE(SRL_VL)
7937   NODE_NAME_CASE(SUB_VL)
7938   NODE_NAME_CASE(UDIV_VL)
7939   NODE_NAME_CASE(UREM_VL)
7940   NODE_NAME_CASE(XOR_VL)
7941   NODE_NAME_CASE(FADD_VL)
7942   NODE_NAME_CASE(FSUB_VL)
7943   NODE_NAME_CASE(FMUL_VL)
7944   NODE_NAME_CASE(FDIV_VL)
7945   NODE_NAME_CASE(FNEG_VL)
7946   NODE_NAME_CASE(FABS_VL)
7947   NODE_NAME_CASE(FSQRT_VL)
7948   NODE_NAME_CASE(FMA_VL)
7949   NODE_NAME_CASE(FCOPYSIGN_VL)
7950   NODE_NAME_CASE(SMIN_VL)
7951   NODE_NAME_CASE(SMAX_VL)
7952   NODE_NAME_CASE(UMIN_VL)
7953   NODE_NAME_CASE(UMAX_VL)
7954   NODE_NAME_CASE(FMINNUM_VL)
7955   NODE_NAME_CASE(FMAXNUM_VL)
7956   NODE_NAME_CASE(MULHS_VL)
7957   NODE_NAME_CASE(MULHU_VL)
7958   NODE_NAME_CASE(FP_TO_SINT_VL)
7959   NODE_NAME_CASE(FP_TO_UINT_VL)
7960   NODE_NAME_CASE(SINT_TO_FP_VL)
7961   NODE_NAME_CASE(UINT_TO_FP_VL)
7962   NODE_NAME_CASE(FP_EXTEND_VL)
7963   NODE_NAME_CASE(FP_ROUND_VL)
7964   NODE_NAME_CASE(SETCC_VL)
7965   NODE_NAME_CASE(VSELECT_VL)
7966   NODE_NAME_CASE(VMAND_VL)
7967   NODE_NAME_CASE(VMOR_VL)
7968   NODE_NAME_CASE(VMXOR_VL)
7969   NODE_NAME_CASE(VMCLR_VL)
7970   NODE_NAME_CASE(VMSET_VL)
7971   NODE_NAME_CASE(VRGATHER_VX_VL)
7972   NODE_NAME_CASE(VRGATHER_VV_VL)
7973   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
7974   NODE_NAME_CASE(VSEXT_VL)
7975   NODE_NAME_CASE(VZEXT_VL)
7976   NODE_NAME_CASE(VPOPC_VL)
7977   NODE_NAME_CASE(VLE_VL)
7978   NODE_NAME_CASE(VSE_VL)
7979   NODE_NAME_CASE(READ_CSR)
7980   NODE_NAME_CASE(WRITE_CSR)
7981   NODE_NAME_CASE(SWAP_CSR)
7982   }
7983   // clang-format on
7984   return nullptr;
7985 #undef NODE_NAME_CASE
7986 }
7987 
7988 /// getConstraintType - Given a constraint letter, return the type of
7989 /// constraint it is for this target.
7990 RISCVTargetLowering::ConstraintType
7991 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
7992   if (Constraint.size() == 1) {
7993     switch (Constraint[0]) {
7994     default:
7995       break;
7996     case 'f':
7997     case 'v':
7998       return C_RegisterClass;
7999     case 'I':
8000     case 'J':
8001     case 'K':
8002       return C_Immediate;
8003     case 'A':
8004       return C_Memory;
8005     }
8006   }
8007   return TargetLowering::getConstraintType(Constraint);
8008 }
8009 
8010 std::pair<unsigned, const TargetRegisterClass *>
8011 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
8012                                                   StringRef Constraint,
8013                                                   MVT VT) const {
8014   // First, see if this is a constraint that directly corresponds to a
8015   // RISCV register class.
8016   if (Constraint.size() == 1) {
8017     switch (Constraint[0]) {
8018     case 'r':
8019       return std::make_pair(0U, &RISCV::GPRRegClass);
8020     case 'f':
8021       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
8022         return std::make_pair(0U, &RISCV::FPR16RegClass);
8023       if (Subtarget.hasStdExtF() && VT == MVT::f32)
8024         return std::make_pair(0U, &RISCV::FPR32RegClass);
8025       if (Subtarget.hasStdExtD() && VT == MVT::f64)
8026         return std::make_pair(0U, &RISCV::FPR64RegClass);
8027       break;
8028     case 'v':
8029       for (const auto *RC :
8030            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
8031             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8032         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
8033           return std::make_pair(0U, RC);
8034       }
8035       break;
8036     default:
8037       break;
8038     }
8039   }
8040 
8041   // Clang will correctly decode the usage of register name aliases into their
8042   // official names. However, other frontends like `rustc` do not. This allows
8043   // users of these frontends to use the ABI names for registers in LLVM-style
8044   // register constraints.
8045   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
8046                                .Case("{zero}", RISCV::X0)
8047                                .Case("{ra}", RISCV::X1)
8048                                .Case("{sp}", RISCV::X2)
8049                                .Case("{gp}", RISCV::X3)
8050                                .Case("{tp}", RISCV::X4)
8051                                .Case("{t0}", RISCV::X5)
8052                                .Case("{t1}", RISCV::X6)
8053                                .Case("{t2}", RISCV::X7)
8054                                .Cases("{s0}", "{fp}", RISCV::X8)
8055                                .Case("{s1}", RISCV::X9)
8056                                .Case("{a0}", RISCV::X10)
8057                                .Case("{a1}", RISCV::X11)
8058                                .Case("{a2}", RISCV::X12)
8059                                .Case("{a3}", RISCV::X13)
8060                                .Case("{a4}", RISCV::X14)
8061                                .Case("{a5}", RISCV::X15)
8062                                .Case("{a6}", RISCV::X16)
8063                                .Case("{a7}", RISCV::X17)
8064                                .Case("{s2}", RISCV::X18)
8065                                .Case("{s3}", RISCV::X19)
8066                                .Case("{s4}", RISCV::X20)
8067                                .Case("{s5}", RISCV::X21)
8068                                .Case("{s6}", RISCV::X22)
8069                                .Case("{s7}", RISCV::X23)
8070                                .Case("{s8}", RISCV::X24)
8071                                .Case("{s9}", RISCV::X25)
8072                                .Case("{s10}", RISCV::X26)
8073                                .Case("{s11}", RISCV::X27)
8074                                .Case("{t3}", RISCV::X28)
8075                                .Case("{t4}", RISCV::X29)
8076                                .Case("{t5}", RISCV::X30)
8077                                .Case("{t6}", RISCV::X31)
8078                                .Default(RISCV::NoRegister);
8079   if (XRegFromAlias != RISCV::NoRegister)
8080     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
8081 
8082   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
8083   // TableGen record rather than the AsmName to choose registers for InlineAsm
8084   // constraints, plus we want to match those names to the widest floating point
8085   // register type available, manually select floating point registers here.
8086   //
8087   // The second case is the ABI name of the register, so that frontends can also
8088   // use the ABI names in register constraint lists.
8089   if (Subtarget.hasStdExtF()) {
8090     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
8091                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
8092                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
8093                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
8094                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
8095                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
8096                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
8097                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
8098                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
8099                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
8100                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
8101                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
8102                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
8103                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
8104                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
8105                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
8106                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
8107                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
8108                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
8109                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
8110                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
8111                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
8112                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
8113                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
8114                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
8115                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
8116                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
8117                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
8118                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
8119                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
8120                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
8121                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
8122                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
8123                         .Default(RISCV::NoRegister);
8124     if (FReg != RISCV::NoRegister) {
8125       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
8126       if (Subtarget.hasStdExtD()) {
8127         unsigned RegNo = FReg - RISCV::F0_F;
8128         unsigned DReg = RISCV::F0_D + RegNo;
8129         return std::make_pair(DReg, &RISCV::FPR64RegClass);
8130       }
8131       return std::make_pair(FReg, &RISCV::FPR32RegClass);
8132     }
8133   }
8134 
8135   if (Subtarget.hasStdExtV()) {
8136     Register VReg = StringSwitch<Register>(Constraint.lower())
8137                         .Case("{v0}", RISCV::V0)
8138                         .Case("{v1}", RISCV::V1)
8139                         .Case("{v2}", RISCV::V2)
8140                         .Case("{v3}", RISCV::V3)
8141                         .Case("{v4}", RISCV::V4)
8142                         .Case("{v5}", RISCV::V5)
8143                         .Case("{v6}", RISCV::V6)
8144                         .Case("{v7}", RISCV::V7)
8145                         .Case("{v8}", RISCV::V8)
8146                         .Case("{v9}", RISCV::V9)
8147                         .Case("{v10}", RISCV::V10)
8148                         .Case("{v11}", RISCV::V11)
8149                         .Case("{v12}", RISCV::V12)
8150                         .Case("{v13}", RISCV::V13)
8151                         .Case("{v14}", RISCV::V14)
8152                         .Case("{v15}", RISCV::V15)
8153                         .Case("{v16}", RISCV::V16)
8154                         .Case("{v17}", RISCV::V17)
8155                         .Case("{v18}", RISCV::V18)
8156                         .Case("{v19}", RISCV::V19)
8157                         .Case("{v20}", RISCV::V20)
8158                         .Case("{v21}", RISCV::V21)
8159                         .Case("{v22}", RISCV::V22)
8160                         .Case("{v23}", RISCV::V23)
8161                         .Case("{v24}", RISCV::V24)
8162                         .Case("{v25}", RISCV::V25)
8163                         .Case("{v26}", RISCV::V26)
8164                         .Case("{v27}", RISCV::V27)
8165                         .Case("{v28}", RISCV::V28)
8166                         .Case("{v29}", RISCV::V29)
8167                         .Case("{v30}", RISCV::V30)
8168                         .Case("{v31}", RISCV::V31)
8169                         .Default(RISCV::NoRegister);
8170     if (VReg != RISCV::NoRegister) {
8171       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
8172         return std::make_pair(VReg, &RISCV::VMRegClass);
8173       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
8174         return std::make_pair(VReg, &RISCV::VRRegClass);
8175       for (const auto *RC :
8176            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
8177         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
8178           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
8179           return std::make_pair(VReg, RC);
8180         }
8181       }
8182     }
8183   }
8184 
8185   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
8186 }
8187 
8188 unsigned
8189 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
8190   // Currently only support length 1 constraints.
8191   if (ConstraintCode.size() == 1) {
8192     switch (ConstraintCode[0]) {
8193     case 'A':
8194       return InlineAsm::Constraint_A;
8195     default:
8196       break;
8197     }
8198   }
8199 
8200   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
8201 }
8202 
8203 void RISCVTargetLowering::LowerAsmOperandForConstraint(
8204     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
8205     SelectionDAG &DAG) const {
8206   // Currently only support length 1 constraints.
8207   if (Constraint.length() == 1) {
8208     switch (Constraint[0]) {
8209     case 'I':
8210       // Validate & create a 12-bit signed immediate operand.
8211       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8212         uint64_t CVal = C->getSExtValue();
8213         if (isInt<12>(CVal))
8214           Ops.push_back(
8215               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8216       }
8217       return;
8218     case 'J':
8219       // Validate & create an integer zero operand.
8220       if (auto *C = dyn_cast<ConstantSDNode>(Op))
8221         if (C->getZExtValue() == 0)
8222           Ops.push_back(
8223               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
8224       return;
8225     case 'K':
8226       // Validate & create a 5-bit unsigned immediate operand.
8227       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
8228         uint64_t CVal = C->getZExtValue();
8229         if (isUInt<5>(CVal))
8230           Ops.push_back(
8231               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
8232       }
8233       return;
8234     default:
8235       break;
8236     }
8237   }
8238   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
8239 }
8240 
8241 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
8242                                                    Instruction *Inst,
8243                                                    AtomicOrdering Ord) const {
8244   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
8245     return Builder.CreateFence(Ord);
8246   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
8247     return Builder.CreateFence(AtomicOrdering::Release);
8248   return nullptr;
8249 }
8250 
8251 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
8252                                                     Instruction *Inst,
8253                                                     AtomicOrdering Ord) const {
8254   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
8255     return Builder.CreateFence(AtomicOrdering::Acquire);
8256   return nullptr;
8257 }
8258 
8259 TargetLowering::AtomicExpansionKind
8260 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
8261   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
8262   // point operations can't be used in an lr/sc sequence without breaking the
8263   // forward-progress guarantee.
8264   if (AI->isFloatingPointOperation())
8265     return AtomicExpansionKind::CmpXChg;
8266 
8267   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
8268   if (Size == 8 || Size == 16)
8269     return AtomicExpansionKind::MaskedIntrinsic;
8270   return AtomicExpansionKind::None;
8271 }
8272 
8273 static Intrinsic::ID
8274 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
8275   if (XLen == 32) {
8276     switch (BinOp) {
8277     default:
8278       llvm_unreachable("Unexpected AtomicRMW BinOp");
8279     case AtomicRMWInst::Xchg:
8280       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
8281     case AtomicRMWInst::Add:
8282       return Intrinsic::riscv_masked_atomicrmw_add_i32;
8283     case AtomicRMWInst::Sub:
8284       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
8285     case AtomicRMWInst::Nand:
8286       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
8287     case AtomicRMWInst::Max:
8288       return Intrinsic::riscv_masked_atomicrmw_max_i32;
8289     case AtomicRMWInst::Min:
8290       return Intrinsic::riscv_masked_atomicrmw_min_i32;
8291     case AtomicRMWInst::UMax:
8292       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
8293     case AtomicRMWInst::UMin:
8294       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
8295     }
8296   }
8297 
8298   if (XLen == 64) {
8299     switch (BinOp) {
8300     default:
8301       llvm_unreachable("Unexpected AtomicRMW BinOp");
8302     case AtomicRMWInst::Xchg:
8303       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
8304     case AtomicRMWInst::Add:
8305       return Intrinsic::riscv_masked_atomicrmw_add_i64;
8306     case AtomicRMWInst::Sub:
8307       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
8308     case AtomicRMWInst::Nand:
8309       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
8310     case AtomicRMWInst::Max:
8311       return Intrinsic::riscv_masked_atomicrmw_max_i64;
8312     case AtomicRMWInst::Min:
8313       return Intrinsic::riscv_masked_atomicrmw_min_i64;
8314     case AtomicRMWInst::UMax:
8315       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
8316     case AtomicRMWInst::UMin:
8317       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
8318     }
8319   }
8320 
8321   llvm_unreachable("Unexpected XLen\n");
8322 }
8323 
8324 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
8325     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
8326     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
8327   unsigned XLen = Subtarget.getXLen();
8328   Value *Ordering =
8329       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8330   Type *Tys[] = {AlignedAddr->getType()};
8331   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8332       AI->getModule(),
8333       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8334 
8335   if (XLen == 64) {
8336     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8337     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8338     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8339   }
8340 
8341   Value *Result;
8342 
8343   // Must pass the shift amount needed to sign extend the loaded value prior
8344   // to performing a signed comparison for min/max. ShiftAmt is the number of
8345   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8346   // is the number of bits to left+right shift the value in order to
8347   // sign-extend.
8348   if (AI->getOperation() == AtomicRMWInst::Min ||
8349       AI->getOperation() == AtomicRMWInst::Max) {
8350     const DataLayout &DL = AI->getModule()->getDataLayout();
8351     unsigned ValWidth =
8352         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8353     Value *SextShamt =
8354         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8355     Result = Builder.CreateCall(LrwOpScwLoop,
8356                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8357   } else {
8358     Result =
8359         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8360   }
8361 
8362   if (XLen == 64)
8363     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8364   return Result;
8365 }
8366 
8367 TargetLowering::AtomicExpansionKind
8368 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8369     AtomicCmpXchgInst *CI) const {
8370   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8371   if (Size == 8 || Size == 16)
8372     return AtomicExpansionKind::MaskedIntrinsic;
8373   return AtomicExpansionKind::None;
8374 }
8375 
8376 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8377     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8378     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8379   unsigned XLen = Subtarget.getXLen();
8380   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8381   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8382   if (XLen == 64) {
8383     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8384     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8385     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8386     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8387   }
8388   Type *Tys[] = {AlignedAddr->getType()};
8389   Function *MaskedCmpXchg =
8390       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8391   Value *Result = Builder.CreateCall(
8392       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8393   if (XLen == 64)
8394     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8395   return Result;
8396 }
8397 
8398 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8399   return false;
8400 }
8401 
8402 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8403                                                      EVT VT) const {
8404   VT = VT.getScalarType();
8405 
8406   if (!VT.isSimple())
8407     return false;
8408 
8409   switch (VT.getSimpleVT().SimpleTy) {
8410   case MVT::f16:
8411     return Subtarget.hasStdExtZfh();
8412   case MVT::f32:
8413     return Subtarget.hasStdExtF();
8414   case MVT::f64:
8415     return Subtarget.hasStdExtD();
8416   default:
8417     break;
8418   }
8419 
8420   return false;
8421 }
8422 
8423 Register RISCVTargetLowering::getExceptionPointerRegister(
8424     const Constant *PersonalityFn) const {
8425   return RISCV::X10;
8426 }
8427 
8428 Register RISCVTargetLowering::getExceptionSelectorRegister(
8429     const Constant *PersonalityFn) const {
8430   return RISCV::X11;
8431 }
8432 
8433 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8434   // Return false to suppress the unnecessary extensions if the LibCall
8435   // arguments or return value is f32 type for LP64 ABI.
8436   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8437   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8438     return false;
8439 
8440   return true;
8441 }
8442 
8443 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8444   if (Subtarget.is64Bit() && Type == MVT::i32)
8445     return true;
8446 
8447   return IsSigned;
8448 }
8449 
8450 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8451                                                  SDValue C) const {
8452   // Check integral scalar types.
8453   if (VT.isScalarInteger()) {
8454     // Omit the optimization if the sub target has the M extension and the data
8455     // size exceeds XLen.
8456     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8457       return false;
8458     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8459       // Break the MUL to a SLLI and an ADD/SUB.
8460       const APInt &Imm = ConstNode->getAPIntValue();
8461       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8462           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8463         return true;
8464       // Omit the following optimization if the sub target has the M extension
8465       // and the data size >= XLen.
8466       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8467         return false;
8468       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8469       // a pair of LUI/ADDI.
8470       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8471         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8472         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8473             (1 - ImmS).isPowerOf2())
8474         return true;
8475       }
8476     }
8477   }
8478 
8479   return false;
8480 }
8481 
8482 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8483     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8484     bool *Fast) const {
8485   if (!VT.isVector())
8486     return false;
8487 
8488   EVT ElemVT = VT.getVectorElementType();
8489   if (Alignment >= ElemVT.getStoreSize()) {
8490     if (Fast)
8491       *Fast = true;
8492     return true;
8493   }
8494 
8495   return false;
8496 }
8497 
8498 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8499     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8500     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8501   bool IsABIRegCopy = CC.hasValue();
8502   EVT ValueVT = Val.getValueType();
8503   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8504     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8505     // and cast to f32.
8506     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8507     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8508     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8509                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8510     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8511     Parts[0] = Val;
8512     return true;
8513   }
8514 
8515   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8516     LLVMContext &Context = *DAG.getContext();
8517     EVT ValueEltVT = ValueVT.getVectorElementType();
8518     EVT PartEltVT = PartVT.getVectorElementType();
8519     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8520     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8521     if (PartVTBitSize % ValueVTBitSize == 0) {
8522       // If the element types are different, bitcast to the same element type of
8523       // PartVT first.
8524       if (ValueEltVT != PartEltVT) {
8525         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8526         assert(Count != 0 && "The number of element should not be zero.");
8527         EVT SameEltTypeVT =
8528             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8529         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8530       }
8531       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8532                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8533       Parts[0] = Val;
8534       return true;
8535     }
8536   }
8537   return false;
8538 }
8539 
8540 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8541     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8542     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8543   bool IsABIRegCopy = CC.hasValue();
8544   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8545     SDValue Val = Parts[0];
8546 
8547     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8548     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8549     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8550     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8551     return Val;
8552   }
8553 
8554   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8555     LLVMContext &Context = *DAG.getContext();
8556     SDValue Val = Parts[0];
8557     EVT ValueEltVT = ValueVT.getVectorElementType();
8558     EVT PartEltVT = PartVT.getVectorElementType();
8559     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8560     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8561     if (PartVTBitSize % ValueVTBitSize == 0) {
8562       EVT SameEltTypeVT = ValueVT;
8563       // If the element types are different, convert it to the same element type
8564       // of PartVT.
8565       if (ValueEltVT != PartEltVT) {
8566         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8567         assert(Count != 0 && "The number of element should not be zero.");
8568         SameEltTypeVT =
8569             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8570       }
8571       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8572                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8573       if (ValueEltVT != PartEltVT)
8574         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8575       return Val;
8576     }
8577   }
8578   return SDValue();
8579 }
8580 
8581 #define GET_REGISTER_MATCHER
8582 #include "RISCVGenAsmMatcher.inc"
8583 
8584 Register
8585 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8586                                        const MachineFunction &MF) const {
8587   Register Reg = MatchRegisterAltName(RegName);
8588   if (Reg == RISCV::NoRegister)
8589     Reg = MatchRegisterName(RegName);
8590   if (Reg == RISCV::NoRegister)
8591     report_fatal_error(
8592         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8593   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8594   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8595     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8596                              StringRef(RegName) + "\"."));
8597   return Reg;
8598 }
8599 
8600 namespace llvm {
8601 namespace RISCVVIntrinsicsTable {
8602 
8603 #define GET_RISCVVIntrinsicsTable_IMPL
8604 #include "RISCVGenSearchableTables.inc"
8605 
8606 } // namespace RISCVVIntrinsicsTable
8607 
8608 } // namespace llvm
8609