1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         MVT ContainerVT = getContainerForFixedLengthVector(VT);
147         unsigned RCID = getRegClassIDForVecVT(ContainerVT);
148         const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
149         addRegisterClass(VT, TRI.getRegClass(RCID));
150       };
151       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
152         if (useRVVForFixedLengthVectorVT(VT))
153           addRegClassForFixedVectors(VT);
154 
155       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
156         if (useRVVForFixedLengthVectorVT(VT))
157           addRegClassForFixedVectors(VT);
158     }
159   }
160 
161   // Compute derived properties from the register classes.
162   computeRegisterProperties(STI.getRegisterInfo());
163 
164   setStackPointerRegisterToSaveRestore(RISCV::X2);
165 
166   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
167     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
168 
169   // TODO: add all necessary setOperationAction calls.
170   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
171 
172   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
173   setOperationAction(ISD::BR_CC, XLenVT, Expand);
174   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
175   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
176 
177   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
178   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
179 
180   setOperationAction(ISD::VASTART, MVT::Other, Custom);
181   setOperationAction(ISD::VAARG, MVT::Other, Expand);
182   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
183   setOperationAction(ISD::VAEND, MVT::Other, Expand);
184 
185   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
186   if (!Subtarget.hasStdExtZbb()) {
187     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
188     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
189   }
190 
191   if (Subtarget.is64Bit()) {
192     setOperationAction(ISD::ADD, MVT::i32, Custom);
193     setOperationAction(ISD::SUB, MVT::i32, Custom);
194     setOperationAction(ISD::SHL, MVT::i32, Custom);
195     setOperationAction(ISD::SRA, MVT::i32, Custom);
196     setOperationAction(ISD::SRL, MVT::i32, Custom);
197 
198     setOperationAction(ISD::UADDO, MVT::i32, Custom);
199     setOperationAction(ISD::USUBO, MVT::i32, Custom);
200     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
201     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
202   }
203 
204   if (!Subtarget.hasStdExtM()) {
205     setOperationAction(ISD::MUL, XLenVT, Expand);
206     setOperationAction(ISD::MULHS, XLenVT, Expand);
207     setOperationAction(ISD::MULHU, XLenVT, Expand);
208     setOperationAction(ISD::SDIV, XLenVT, Expand);
209     setOperationAction(ISD::UDIV, XLenVT, Expand);
210     setOperationAction(ISD::SREM, XLenVT, Expand);
211     setOperationAction(ISD::UREM, XLenVT, Expand);
212   } else {
213     if (Subtarget.is64Bit()) {
214       setOperationAction(ISD::MUL, MVT::i32, Custom);
215       setOperationAction(ISD::MUL, MVT::i128, Custom);
216 
217       setOperationAction(ISD::SDIV, MVT::i8, Custom);
218       setOperationAction(ISD::UDIV, MVT::i8, Custom);
219       setOperationAction(ISD::UREM, MVT::i8, Custom);
220       setOperationAction(ISD::SDIV, MVT::i16, Custom);
221       setOperationAction(ISD::UDIV, MVT::i16, Custom);
222       setOperationAction(ISD::UREM, MVT::i16, Custom);
223       setOperationAction(ISD::SDIV, MVT::i32, Custom);
224       setOperationAction(ISD::UDIV, MVT::i32, Custom);
225       setOperationAction(ISD::UREM, MVT::i32, Custom);
226     } else {
227       setOperationAction(ISD::MUL, MVT::i64, Custom);
228     }
229   }
230 
231   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
232   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
233   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
234   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
235 
236   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
237   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
238   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
239 
240   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
241     if (Subtarget.is64Bit()) {
242       setOperationAction(ISD::ROTL, MVT::i32, Custom);
243       setOperationAction(ISD::ROTR, MVT::i32, Custom);
244     }
245   } else {
246     setOperationAction(ISD::ROTL, XLenVT, Expand);
247     setOperationAction(ISD::ROTR, XLenVT, Expand);
248   }
249 
250   if (Subtarget.hasStdExtZbp()) {
251     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
252     // more combining.
253     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
254     setOperationAction(ISD::BSWAP, XLenVT, Custom);
255 
256     if (Subtarget.is64Bit()) {
257       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
258       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
259     }
260   } else {
261     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
262     // pattern match it directly in isel.
263     setOperationAction(ISD::BSWAP, XLenVT,
264                        Subtarget.hasStdExtZbb() ? Legal : Expand);
265   }
266 
267   if (Subtarget.hasStdExtZbb()) {
268     setOperationAction(ISD::SMIN, XLenVT, Legal);
269     setOperationAction(ISD::SMAX, XLenVT, Legal);
270     setOperationAction(ISD::UMIN, XLenVT, Legal);
271     setOperationAction(ISD::UMAX, XLenVT, Legal);
272 
273     if (Subtarget.is64Bit()) {
274       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
275       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
276       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
277       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
278     }
279   } else {
280     setOperationAction(ISD::CTTZ, XLenVT, Expand);
281     setOperationAction(ISD::CTLZ, XLenVT, Expand);
282     setOperationAction(ISD::CTPOP, XLenVT, Expand);
283   }
284 
285   if (Subtarget.hasStdExtZbt()) {
286     setOperationAction(ISD::FSHL, XLenVT, Custom);
287     setOperationAction(ISD::FSHR, XLenVT, Custom);
288     setOperationAction(ISD::SELECT, XLenVT, Legal);
289 
290     if (Subtarget.is64Bit()) {
291       setOperationAction(ISD::FSHL, MVT::i32, Custom);
292       setOperationAction(ISD::FSHR, MVT::i32, Custom);
293     }
294   } else {
295     setOperationAction(ISD::SELECT, XLenVT, Custom);
296   }
297 
298   ISD::CondCode FPCCToExpand[] = {
299       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
300       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
301       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
302 
303   ISD::NodeType FPOpToExpand[] = {
304       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
305       ISD::FP_TO_FP16};
306 
307   if (Subtarget.hasStdExtZfh())
308     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
309 
310   if (Subtarget.hasStdExtZfh()) {
311     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
312     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
313     for (auto CC : FPCCToExpand)
314       setCondCodeAction(CC, MVT::f16, Expand);
315     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
316     setOperationAction(ISD::SELECT, MVT::f16, Custom);
317     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
318     for (auto Op : FPOpToExpand)
319       setOperationAction(Op, MVT::f16, Expand);
320   }
321 
322   if (Subtarget.hasStdExtF()) {
323     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
324     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
325     for (auto CC : FPCCToExpand)
326       setCondCodeAction(CC, MVT::f32, Expand);
327     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
328     setOperationAction(ISD::SELECT, MVT::f32, Custom);
329     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
330     for (auto Op : FPOpToExpand)
331       setOperationAction(Op, MVT::f32, Expand);
332     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
333     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
334   }
335 
336   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
337     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
338 
339   if (Subtarget.hasStdExtD()) {
340     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
341     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
342     for (auto CC : FPCCToExpand)
343       setCondCodeAction(CC, MVT::f64, Expand);
344     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
345     setOperationAction(ISD::SELECT, MVT::f64, Custom);
346     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
347     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
348     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
349     for (auto Op : FPOpToExpand)
350       setOperationAction(Op, MVT::f64, Expand);
351     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
352     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
353   }
354 
355   if (Subtarget.is64Bit()) {
356     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
357     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
358     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
359     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
360   }
361 
362   if (Subtarget.hasStdExtF()) {
363     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
364     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
365   }
366 
367   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
368   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
369   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
370   setOperationAction(ISD::JumpTable, XLenVT, Custom);
371 
372   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
373 
374   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
375   // Unfortunately this can't be determined just from the ISA naming string.
376   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
377                      Subtarget.is64Bit() ? Legal : Custom);
378 
379   setOperationAction(ISD::TRAP, MVT::Other, Legal);
380   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
381   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
382   if (Subtarget.is64Bit())
383     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
384 
385   if (Subtarget.hasStdExtA()) {
386     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
387     setMinCmpXchgSizeInBits(32);
388   } else {
389     setMaxAtomicSizeInBitsSupported(0);
390   }
391 
392   setBooleanContents(ZeroOrOneBooleanContent);
393 
394   if (Subtarget.hasStdExtV()) {
395     setBooleanVectorContents(ZeroOrOneBooleanContent);
396 
397     setOperationAction(ISD::VSCALE, XLenVT, Custom);
398 
399     // RVV intrinsics may have illegal operands.
400     // We also need to custom legalize vmv.x.s.
401     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
402     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
403     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
404     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
405     if (Subtarget.is64Bit()) {
406       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
407     } else {
408       setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
409       setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
410     }
411 
412     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
413 
414     if (!Subtarget.is64Bit()) {
415       // We must custom-lower certain vXi64 operations on RV32 due to the vector
416       // element type being illegal.
417       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
418       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
419 
420       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
421       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
422       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
423       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
424       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
425       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
426       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
427       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
428     }
429 
430     for (MVT VT : BoolVecVTs) {
431       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
432 
433       // Mask VTs are custom-expanded into a series of standard nodes
434       setOperationAction(ISD::TRUNCATE, VT, Custom);
435       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
436       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
437 
438       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
439 
440       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
441       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
442       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
443 
444       // Expand all extending loads to types larger than this, and truncating
445       // stores from types larger than this.
446       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
447         setTruncStoreAction(OtherVT, VT, Expand);
448         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
449         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
450         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
451       }
452     }
453 
454     for (MVT VT : IntVecVTs) {
455       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
456       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
457 
458       setOperationAction(ISD::SMIN, VT, Legal);
459       setOperationAction(ISD::SMAX, VT, Legal);
460       setOperationAction(ISD::UMIN, VT, Legal);
461       setOperationAction(ISD::UMAX, VT, Legal);
462 
463       setOperationAction(ISD::ROTL, VT, Expand);
464       setOperationAction(ISD::ROTR, VT, Expand);
465 
466       // Custom-lower extensions and truncations from/to mask types.
467       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
468       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
469       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
470 
471       // RVV has native int->float & float->int conversions where the
472       // element type sizes are within one power-of-two of each other. Any
473       // wider distances between type sizes have to be lowered as sequences
474       // which progressively narrow the gap in stages.
475       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
476       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
477       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
478       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
479 
480       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
481       // nodes which truncate by one power of two at a time.
482       setOperationAction(ISD::TRUNCATE, VT, Custom);
483 
484       // Custom-lower insert/extract operations to simplify patterns.
485       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
486       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
487 
488       // Custom-lower reduction operations to set up the corresponding custom
489       // nodes' operands.
490       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
491       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
492       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
493       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
494       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
495       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
496       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
497       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
498 
499       setOperationAction(ISD::MLOAD, VT, Custom);
500       setOperationAction(ISD::MSTORE, VT, Custom);
501       setOperationAction(ISD::MGATHER, VT, Custom);
502       setOperationAction(ISD::MSCATTER, VT, Custom);
503 
504       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
505       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
506       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
507 
508       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
509       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
510 
511       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
512         setTruncStoreAction(VT, OtherVT, Expand);
513         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
514         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
515         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
516       }
517     }
518 
519     // Expand various CCs to best match the RVV ISA, which natively supports UNE
520     // but no other unordered comparisons, and supports all ordered comparisons
521     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
522     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
523     // and we pattern-match those back to the "original", swapping operands once
524     // more. This way we catch both operations and both "vf" and "fv" forms with
525     // fewer patterns.
526     ISD::CondCode VFPCCToExpand[] = {
527         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
528         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
529         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
530     };
531 
532     // Sets common operation actions on RVV floating-point vector types.
533     const auto SetCommonVFPActions = [&](MVT VT) {
534       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
535       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
536       // sizes are within one power-of-two of each other. Therefore conversions
537       // between vXf16 and vXf64 must be lowered as sequences which convert via
538       // vXf32.
539       setOperationAction(ISD::FP_ROUND, VT, Custom);
540       setOperationAction(ISD::FP_EXTEND, VT, Custom);
541       // Custom-lower insert/extract operations to simplify patterns.
542       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
543       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
544       // Expand various condition codes (explained above).
545       for (auto CC : VFPCCToExpand)
546         setCondCodeAction(CC, VT, Expand);
547 
548       setOperationAction(ISD::FMINNUM, VT, Legal);
549       setOperationAction(ISD::FMAXNUM, VT, Legal);
550 
551       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
552       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
553       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
554 
555       setOperationAction(ISD::MLOAD, VT, Custom);
556       setOperationAction(ISD::MSTORE, VT, Custom);
557       setOperationAction(ISD::MGATHER, VT, Custom);
558       setOperationAction(ISD::MSCATTER, VT, Custom);
559 
560       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
561       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
562       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
563 
564       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
565     };
566 
567     // Sets common extload/truncstore actions on RVV floating-point vector
568     // types.
569     const auto SetCommonVFPExtLoadTruncStoreActions =
570         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
571           for (auto SmallVT : SmallerVTs) {
572             setTruncStoreAction(VT, SmallVT, Expand);
573             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
574           }
575         };
576 
577     if (Subtarget.hasStdExtZfh())
578       for (MVT VT : F16VecVTs)
579         SetCommonVFPActions(VT);
580 
581     for (MVT VT : F32VecVTs) {
582       if (Subtarget.hasStdExtF())
583         SetCommonVFPActions(VT);
584       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
585     }
586 
587     for (MVT VT : F64VecVTs) {
588       if (Subtarget.hasStdExtD())
589         SetCommonVFPActions(VT);
590       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
591       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
592     }
593 
594     if (Subtarget.useRVVForFixedLengthVectors()) {
595       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
596         if (!useRVVForFixedLengthVectorVT(VT))
597           continue;
598 
599         // By default everything must be expanded.
600         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
601           setOperationAction(Op, VT, Expand);
602         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
603           setTruncStoreAction(VT, OtherVT, Expand);
604           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
605           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
606           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
607         }
608 
609         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
610         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
611         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
612 
613         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
614         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
615 
616         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
617 
618         setOperationAction(ISD::LOAD, VT, Custom);
619         setOperationAction(ISD::STORE, VT, Custom);
620 
621         setOperationAction(ISD::SETCC, VT, Custom);
622 
623         setOperationAction(ISD::TRUNCATE, VT, Custom);
624 
625         setOperationAction(ISD::BITCAST, VT, Custom);
626 
627         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
628         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
629         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
630 
631         // Operations below are different for between masks and other vectors.
632         if (VT.getVectorElementType() == MVT::i1) {
633           setOperationAction(ISD::AND, VT, Custom);
634           setOperationAction(ISD::OR, VT, Custom);
635           setOperationAction(ISD::XOR, VT, Custom);
636           continue;
637         }
638 
639         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
640         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
641 
642         setOperationAction(ISD::MLOAD, VT, Custom);
643         setOperationAction(ISD::MSTORE, VT, Custom);
644         setOperationAction(ISD::MGATHER, VT, Custom);
645         setOperationAction(ISD::MSCATTER, VT, Custom);
646         setOperationAction(ISD::ADD, VT, Custom);
647         setOperationAction(ISD::MUL, VT, Custom);
648         setOperationAction(ISD::SUB, VT, Custom);
649         setOperationAction(ISD::AND, VT, Custom);
650         setOperationAction(ISD::OR, VT, Custom);
651         setOperationAction(ISD::XOR, VT, Custom);
652         setOperationAction(ISD::SDIV, VT, Custom);
653         setOperationAction(ISD::SREM, VT, Custom);
654         setOperationAction(ISD::UDIV, VT, Custom);
655         setOperationAction(ISD::UREM, VT, Custom);
656         setOperationAction(ISD::SHL, VT, Custom);
657         setOperationAction(ISD::SRA, VT, Custom);
658         setOperationAction(ISD::SRL, VT, Custom);
659 
660         setOperationAction(ISD::SMIN, VT, Custom);
661         setOperationAction(ISD::SMAX, VT, Custom);
662         setOperationAction(ISD::UMIN, VT, Custom);
663         setOperationAction(ISD::UMAX, VT, Custom);
664         setOperationAction(ISD::ABS,  VT, Custom);
665 
666         setOperationAction(ISD::MULHS, VT, Custom);
667         setOperationAction(ISD::MULHU, VT, Custom);
668 
669         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
670         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
671         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
672         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
673 
674         setOperationAction(ISD::VSELECT, VT, Custom);
675 
676         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
677         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
678         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
679 
680         // Custom-lower reduction operations to set up the corresponding custom
681         // nodes' operands.
682         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
683         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
684         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
685         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
686         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
687       }
688 
689       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
690         if (!useRVVForFixedLengthVectorVT(VT))
691           continue;
692 
693         // By default everything must be expanded.
694         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
695           setOperationAction(Op, VT, Expand);
696         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
697           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
698           setTruncStoreAction(VT, OtherVT, Expand);
699         }
700 
701         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
702         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
703         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
704 
705         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
706         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
707         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
708         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
709 
710         setOperationAction(ISD::LOAD, VT, Custom);
711         setOperationAction(ISD::STORE, VT, Custom);
712         setOperationAction(ISD::MLOAD, VT, Custom);
713         setOperationAction(ISD::MSTORE, VT, Custom);
714         setOperationAction(ISD::MGATHER, VT, Custom);
715         setOperationAction(ISD::MSCATTER, VT, Custom);
716         setOperationAction(ISD::FADD, VT, Custom);
717         setOperationAction(ISD::FSUB, VT, Custom);
718         setOperationAction(ISD::FMUL, VT, Custom);
719         setOperationAction(ISD::FDIV, VT, Custom);
720         setOperationAction(ISD::FNEG, VT, Custom);
721         setOperationAction(ISD::FABS, VT, Custom);
722         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
723         setOperationAction(ISD::FSQRT, VT, Custom);
724         setOperationAction(ISD::FMA, VT, Custom);
725         setOperationAction(ISD::FMINNUM, VT, Custom);
726         setOperationAction(ISD::FMAXNUM, VT, Custom);
727 
728         setOperationAction(ISD::FP_ROUND, VT, Custom);
729         setOperationAction(ISD::FP_EXTEND, VT, Custom);
730 
731         for (auto CC : VFPCCToExpand)
732           setCondCodeAction(CC, VT, Expand);
733 
734         setOperationAction(ISD::VSELECT, VT, Custom);
735 
736         setOperationAction(ISD::BITCAST, VT, Custom);
737 
738         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
739         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
740       }
741 
742       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
743       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
744       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
745       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
746       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
747       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
748       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
749       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
750     }
751   }
752 
753   // Function alignments.
754   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
755   setMinFunctionAlignment(FunctionAlignment);
756   setPrefFunctionAlignment(FunctionAlignment);
757 
758   setMinimumJumpTableEntries(5);
759 
760   // Jumps are expensive, compared to logic
761   setJumpIsExpensive();
762 
763   // We can use any register for comparisons
764   setHasMultipleConditionRegisters();
765 
766   if (Subtarget.hasStdExtZbp()) {
767     setTargetDAGCombine(ISD::OR);
768   }
769   if (Subtarget.hasStdExtV()) {
770     setTargetDAGCombine(ISD::FCOPYSIGN);
771     setTargetDAGCombine(ISD::MGATHER);
772     setTargetDAGCombine(ISD::MSCATTER);
773   }
774 }
775 
776 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
777                                             LLVMContext &Context,
778                                             EVT VT) const {
779   if (!VT.isVector())
780     return getPointerTy(DL);
781   if (Subtarget.hasStdExtV() &&
782       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
783     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
784   return VT.changeVectorElementTypeToInteger();
785 }
786 
787 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
788                                              const CallInst &I,
789                                              MachineFunction &MF,
790                                              unsigned Intrinsic) const {
791   switch (Intrinsic) {
792   default:
793     return false;
794   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
795   case Intrinsic::riscv_masked_atomicrmw_add_i32:
796   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
797   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
798   case Intrinsic::riscv_masked_atomicrmw_max_i32:
799   case Intrinsic::riscv_masked_atomicrmw_min_i32:
800   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
801   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
802   case Intrinsic::riscv_masked_cmpxchg_i32:
803     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
804     Info.opc = ISD::INTRINSIC_W_CHAIN;
805     Info.memVT = MVT::getVT(PtrTy->getElementType());
806     Info.ptrVal = I.getArgOperand(0);
807     Info.offset = 0;
808     Info.align = Align(4);
809     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
810                  MachineMemOperand::MOVolatile;
811     return true;
812   }
813 }
814 
815 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
816                                                 const AddrMode &AM, Type *Ty,
817                                                 unsigned AS,
818                                                 Instruction *I) const {
819   // No global is ever allowed as a base.
820   if (AM.BaseGV)
821     return false;
822 
823   // Require a 12-bit signed offset.
824   if (!isInt<12>(AM.BaseOffs))
825     return false;
826 
827   switch (AM.Scale) {
828   case 0: // "r+i" or just "i", depending on HasBaseReg.
829     break;
830   case 1:
831     if (!AM.HasBaseReg) // allow "r+i".
832       break;
833     return false; // disallow "r+r" or "r+r+i".
834   default:
835     return false;
836   }
837 
838   return true;
839 }
840 
841 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
842   return isInt<12>(Imm);
843 }
844 
845 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
846   return isInt<12>(Imm);
847 }
848 
849 // On RV32, 64-bit integers are split into their high and low parts and held
850 // in two different registers, so the trunc is free since the low register can
851 // just be used.
852 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
853   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
854     return false;
855   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
856   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
857   return (SrcBits == 64 && DestBits == 32);
858 }
859 
860 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
861   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
862       !SrcVT.isInteger() || !DstVT.isInteger())
863     return false;
864   unsigned SrcBits = SrcVT.getSizeInBits();
865   unsigned DestBits = DstVT.getSizeInBits();
866   return (SrcBits == 64 && DestBits == 32);
867 }
868 
869 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
870   // Zexts are free if they can be combined with a load.
871   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
872     EVT MemVT = LD->getMemoryVT();
873     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
874          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
875         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
876          LD->getExtensionType() == ISD::ZEXTLOAD))
877       return true;
878   }
879 
880   return TargetLowering::isZExtFree(Val, VT2);
881 }
882 
883 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
884   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
885 }
886 
887 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
888   return Subtarget.hasStdExtZbb();
889 }
890 
891 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
892   return Subtarget.hasStdExtZbb();
893 }
894 
895 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
896                                        bool ForCodeSize) const {
897   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
898     return false;
899   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
900     return false;
901   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
902     return false;
903   if (Imm.isNegZero())
904     return false;
905   return Imm.isZero();
906 }
907 
908 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
909   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
910          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
911          (VT == MVT::f64 && Subtarget.hasStdExtD());
912 }
913 
914 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
915                                                       CallingConv::ID CC,
916                                                       EVT VT) const {
917   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
918   // end up using a GPR but that will be decided based on ABI.
919   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
920     return MVT::f32;
921 
922   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
923 }
924 
925 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
926                                                            CallingConv::ID CC,
927                                                            EVT VT) const {
928   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
929   // end up using a GPR but that will be decided based on ABI.
930   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
931     return 1;
932 
933   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
934 }
935 
936 // Changes the condition code and swaps operands if necessary, so the SetCC
937 // operation matches one of the comparisons supported directly by branches
938 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
939 // with 1/-1.
940 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
941                                     ISD::CondCode &CC, SelectionDAG &DAG) {
942   // Convert X > -1 to X >= 0.
943   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
944     RHS = DAG.getConstant(0, DL, RHS.getValueType());
945     CC = ISD::SETGE;
946     return;
947   }
948   // Convert X < 1 to 0 >= X.
949   if (CC == ISD::SETLT && isOneConstant(RHS)) {
950     RHS = LHS;
951     LHS = DAG.getConstant(0, DL, RHS.getValueType());
952     CC = ISD::SETGE;
953     return;
954   }
955 
956   switch (CC) {
957   default:
958     break;
959   case ISD::SETGT:
960   case ISD::SETLE:
961   case ISD::SETUGT:
962   case ISD::SETULE:
963     CC = ISD::getSetCCSwappedOperands(CC);
964     std::swap(LHS, RHS);
965     break;
966   }
967 }
968 
969 // Return the RISC-V branch opcode that matches the given DAG integer
970 // condition code. The CondCode must be one of those supported by the RISC-V
971 // ISA (see translateSetCCForBranch).
972 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
973   switch (CC) {
974   default:
975     llvm_unreachable("Unsupported CondCode");
976   case ISD::SETEQ:
977     return RISCV::BEQ;
978   case ISD::SETNE:
979     return RISCV::BNE;
980   case ISD::SETLT:
981     return RISCV::BLT;
982   case ISD::SETGE:
983     return RISCV::BGE;
984   case ISD::SETULT:
985     return RISCV::BLTU;
986   case ISD::SETUGE:
987     return RISCV::BGEU;
988   }
989 }
990 
991 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
992   assert(VT.isScalableVector() && "Expecting a scalable vector type");
993   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
994   if (VT.getVectorElementType() == MVT::i1)
995     KnownSize *= 8;
996 
997   switch (KnownSize) {
998   default:
999     llvm_unreachable("Invalid LMUL.");
1000   case 8:
1001     return RISCVVLMUL::LMUL_F8;
1002   case 16:
1003     return RISCVVLMUL::LMUL_F4;
1004   case 32:
1005     return RISCVVLMUL::LMUL_F2;
1006   case 64:
1007     return RISCVVLMUL::LMUL_1;
1008   case 128:
1009     return RISCVVLMUL::LMUL_2;
1010   case 256:
1011     return RISCVVLMUL::LMUL_4;
1012   case 512:
1013     return RISCVVLMUL::LMUL_8;
1014   }
1015 }
1016 
1017 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) {
1018   switch (LMul) {
1019   default:
1020     llvm_unreachable("Invalid LMUL.");
1021   case RISCVVLMUL::LMUL_F8:
1022   case RISCVVLMUL::LMUL_F4:
1023   case RISCVVLMUL::LMUL_F2:
1024   case RISCVVLMUL::LMUL_1:
1025     return RISCV::VRRegClassID;
1026   case RISCVVLMUL::LMUL_2:
1027     return RISCV::VRM2RegClassID;
1028   case RISCVVLMUL::LMUL_4:
1029     return RISCV::VRM4RegClassID;
1030   case RISCVVLMUL::LMUL_8:
1031     return RISCV::VRM8RegClassID;
1032   }
1033 }
1034 
1035 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1036   RISCVVLMUL LMUL = getLMUL(VT);
1037   if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
1038       LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
1039     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1040                   "Unexpected subreg numbering");
1041     return RISCV::sub_vrm1_0 + Index;
1042   }
1043   if (LMUL == RISCVVLMUL::LMUL_2) {
1044     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1045                   "Unexpected subreg numbering");
1046     return RISCV::sub_vrm2_0 + Index;
1047   }
1048   if (LMUL == RISCVVLMUL::LMUL_4) {
1049     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1050                   "Unexpected subreg numbering");
1051     return RISCV::sub_vrm4_0 + Index;
1052   }
1053   llvm_unreachable("Invalid vector type.");
1054 }
1055 
1056 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1057   if (VT.getVectorElementType() == MVT::i1)
1058     return RISCV::VRRegClassID;
1059   return getRegClassIDForLMUL(getLMUL(VT));
1060 }
1061 
1062 // Attempt to decompose a subvector insert/extract between VecVT and
1063 // SubVecVT via subregister indices. Returns the subregister index that
1064 // can perform the subvector insert/extract with the given element index, as
1065 // well as the index corresponding to any leftover subvectors that must be
1066 // further inserted/extracted within the register class for SubVecVT.
1067 std::pair<unsigned, unsigned>
1068 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1069     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1070     const RISCVRegisterInfo *TRI) {
1071   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1072                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1073                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1074                 "Register classes not ordered");
1075   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1076   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1077   // Try to compose a subregister index that takes us from the incoming
1078   // LMUL>1 register class down to the outgoing one. At each step we half
1079   // the LMUL:
1080   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1081   // Note that this is not guaranteed to find a subregister index, such as
1082   // when we are extracting from one VR type to another.
1083   unsigned SubRegIdx = RISCV::NoSubRegister;
1084   for (const unsigned RCID :
1085        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1086     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1087       VecVT = VecVT.getHalfNumVectorElementsVT();
1088       bool IsHi =
1089           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1090       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1091                                             getSubregIndexByMVT(VecVT, IsHi));
1092       if (IsHi)
1093         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1094     }
1095   return {SubRegIdx, InsertExtractIdx};
1096 }
1097 
1098 static bool useRVVForFixedLengthVectorVT(MVT VT,
1099                                          const RISCVSubtarget &Subtarget) {
1100   assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1101   if (!Subtarget.useRVVForFixedLengthVectors())
1102     return false;
1103 
1104   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1105 
1106   // Don't use RVV for vectors we cannot scalarize if required.
1107   switch (VT.getVectorElementType().SimpleTy) {
1108   // i1 is supported but has different rules.
1109   default:
1110     return false;
1111   case MVT::i1:
1112     // Masks can only use a single register.
1113     if (VT.getVectorNumElements() > MinVLen)
1114       return false;
1115     MinVLen /= 8;
1116     break;
1117   case MVT::i8:
1118   case MVT::i16:
1119   case MVT::i32:
1120   case MVT::i64:
1121     break;
1122   case MVT::f16:
1123     if (!Subtarget.hasStdExtZfh())
1124       return false;
1125     break;
1126   case MVT::f32:
1127     if (!Subtarget.hasStdExtF())
1128       return false;
1129     break;
1130   case MVT::f64:
1131     if (!Subtarget.hasStdExtD())
1132       return false;
1133     break;
1134   }
1135 
1136   unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1137   // Don't use RVV for types that don't fit.
1138   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1139     return false;
1140 
1141   // TODO: Perhaps an artificial restriction, but worth having whilst getting
1142   // the base fixed length RVV support in place.
1143   if (!VT.isPow2VectorType())
1144     return false;
1145 
1146   return true;
1147 }
1148 
1149 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1150   return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1151 }
1152 
1153 // Return the largest legal scalable vector type that matches VT's element type.
1154 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1155                                             const RISCVSubtarget &Subtarget) {
1156   // This may be called before legal types are setup.
1157   assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1158           useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1159          "Expected legal fixed length vector!");
1160 
1161   unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1162 
1163   MVT EltVT = VT.getVectorElementType();
1164   switch (EltVT.SimpleTy) {
1165   default:
1166     llvm_unreachable("unexpected element type for RVV container");
1167   case MVT::i1: {
1168     // Masks are calculated assuming 8-bit elements since that's when we need
1169     // the most elements.
1170     MinVLen /= 8;
1171     unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1172     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8;
1173     return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock);
1174   }
1175   case MVT::i8:
1176   case MVT::i16:
1177   case MVT::i32:
1178   case MVT::i64:
1179   case MVT::f16:
1180   case MVT::f32:
1181   case MVT::f64: {
1182     unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1183     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits();
1184     return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock);
1185   }
1186   }
1187 }
1188 
1189 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1190                                             const RISCVSubtarget &Subtarget) {
1191   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1192                                           Subtarget);
1193 }
1194 
1195 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1196   return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1197 }
1198 
1199 // Grow V to consume an entire RVV register.
1200 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1201                                        const RISCVSubtarget &Subtarget) {
1202   assert(VT.isScalableVector() &&
1203          "Expected to convert into a scalable vector!");
1204   assert(V.getValueType().isFixedLengthVector() &&
1205          "Expected a fixed length vector operand!");
1206   SDLoc DL(V);
1207   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1208   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1209 }
1210 
1211 // Shrink V so it's just big enough to maintain a VT's worth of data.
1212 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1213                                          const RISCVSubtarget &Subtarget) {
1214   assert(VT.isFixedLengthVector() &&
1215          "Expected to convert into a fixed length vector!");
1216   assert(V.getValueType().isScalableVector() &&
1217          "Expected a scalable vector operand!");
1218   SDLoc DL(V);
1219   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1220   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1221 }
1222 
1223 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1224 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1225 // the vector type that it is contained in.
1226 static std::pair<SDValue, SDValue>
1227 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1228                 const RISCVSubtarget &Subtarget) {
1229   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1230   MVT XLenVT = Subtarget.getXLenVT();
1231   SDValue VL = VecVT.isFixedLengthVector()
1232                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1233                    : DAG.getRegister(RISCV::X0, XLenVT);
1234   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1235   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1236   return {Mask, VL};
1237 }
1238 
1239 // As above but assuming the given type is a scalable vector type.
1240 static std::pair<SDValue, SDValue>
1241 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1242                         const RISCVSubtarget &Subtarget) {
1243   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1244   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1245 }
1246 
1247 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1248 // of either is (currently) supported. This can get us into an infinite loop
1249 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1250 // as a ..., etc.
1251 // Until either (or both) of these can reliably lower any node, reporting that
1252 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1253 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1254 // which is not desirable.
1255 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1256     EVT VT, unsigned DefinedValues) const {
1257   return false;
1258 }
1259 
1260 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1261   // Only splats are currently supported.
1262   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1263     return true;
1264 
1265   return false;
1266 }
1267 
1268 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1269                                  const RISCVSubtarget &Subtarget) {
1270   MVT VT = Op.getSimpleValueType();
1271   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1272 
1273   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1274 
1275   SDLoc DL(Op);
1276   SDValue Mask, VL;
1277   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1278 
1279   MVT XLenVT = Subtarget.getXLenVT();
1280   unsigned NumElts = Op.getNumOperands();
1281 
1282   if (VT.getVectorElementType() == MVT::i1) {
1283     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1284       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1285       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1286     }
1287 
1288     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1289       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1290       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1291     }
1292 
1293     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1294     // scalar integer chunks whose bit-width depends on the number of mask
1295     // bits and XLEN.
1296     // First, determine the most appropriate scalar integer type to use. This
1297     // is at most XLenVT, but may be shrunk to a smaller vector element type
1298     // according to the size of the final vector - use i8 chunks rather than
1299     // XLenVT if we're producing a v8i1. This results in more consistent
1300     // codegen across RV32 and RV64.
1301     // If we have to use more than one INSERT_VECTOR_ELT then this optimization
1302     // is likely to increase code size; avoid peforming it in such a case.
1303     unsigned NumViaIntegerBits =
1304         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1305     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1306         (!DAG.shouldOptForSize() || NumElts <= NumViaIntegerBits)) {
1307       // Now we can create our integer vector type. Note that it may be larger
1308       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1309       MVT IntegerViaVecVT =
1310           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1311                            divideCeil(NumElts, NumViaIntegerBits));
1312 
1313       uint64_t Bits = 0;
1314       unsigned BitPos = 0, IntegerEltIdx = 0;
1315       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1316 
1317       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1318         // Once we accumulate enough bits to fill our scalar type, insert into
1319         // our vector and clear our accumulated data.
1320         if (I != 0 && I % NumViaIntegerBits == 0) {
1321           if (NumViaIntegerBits <= 32)
1322             Bits = SignExtend64(Bits, 32);
1323           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1324           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1325                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1326           Bits = 0;
1327           BitPos = 0;
1328           IntegerEltIdx++;
1329         }
1330         SDValue V = Op.getOperand(I);
1331         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1332         Bits |= ((uint64_t)BitValue << BitPos);
1333       }
1334 
1335       // Insert the (remaining) scalar value into position in our integer
1336       // vector type.
1337       if (NumViaIntegerBits <= 32)
1338         Bits = SignExtend64(Bits, 32);
1339       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1340       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1341                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1342 
1343       if (NumElts < NumViaIntegerBits) {
1344         // If we're producing a smaller vector than our minimum legal integer
1345         // type, bitcast to the equivalent (known-legal) mask type, and extract
1346         // our final mask.
1347         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1348         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1349         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1350                           DAG.getConstant(0, DL, XLenVT));
1351       } else {
1352         // Else we must have produced an integer type with the same size as the
1353         // mask type; bitcast for the final result.
1354         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1355         Vec = DAG.getBitcast(VT, Vec);
1356       }
1357 
1358       return Vec;
1359     }
1360 
1361     return SDValue();
1362   }
1363 
1364   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1365     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1366                                         : RISCVISD::VMV_V_X_VL;
1367     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1368     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1369   }
1370 
1371   // Try and match an index sequence, which we can lower directly to the vid
1372   // instruction. An all-undef vector is matched by getSplatValue, above.
1373   if (VT.isInteger()) {
1374     bool IsVID = true;
1375     for (unsigned I = 0; I < NumElts && IsVID; I++)
1376       IsVID &= Op.getOperand(I).isUndef() ||
1377                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1378                 Op.getConstantOperandVal(I) == I);
1379 
1380     if (IsVID) {
1381       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1382       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1383     }
1384   }
1385 
1386   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1387   // when re-interpreted as a vector with a larger element type. For example,
1388   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1389   // could be instead splat as
1390   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1391   // TODO: This optimization could also work on non-constant splats, but it
1392   // would require bit-manipulation instructions to construct the splat value.
1393   SmallVector<SDValue> Sequence;
1394   unsigned EltBitSize = VT.getScalarSizeInBits();
1395   const auto *BV = cast<BuildVectorSDNode>(Op);
1396   if (VT.isInteger() && EltBitSize < 64 &&
1397       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1398       BV->getRepeatedSequence(Sequence) &&
1399       (Sequence.size() * EltBitSize) <= 64) {
1400     unsigned SeqLen = Sequence.size();
1401     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1402     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1403     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1404             ViaIntVT == MVT::i64) &&
1405            "Unexpected sequence type");
1406 
1407     unsigned EltIdx = 0;
1408     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1409     uint64_t SplatValue = 0;
1410     // Construct the amalgamated value which can be splatted as this larger
1411     // vector type.
1412     for (const auto &SeqV : Sequence) {
1413       if (!SeqV.isUndef())
1414         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1415                        << (EltIdx * EltBitSize));
1416       EltIdx++;
1417     }
1418 
1419     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1420     // achieve better constant materializion.
1421     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1422       SplatValue = SignExtend64(SplatValue, 32);
1423 
1424     // Since we can't introduce illegal i64 types at this stage, we can only
1425     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1426     // way we can use RVV instructions to splat.
1427     assert((ViaIntVT.bitsLE(XLenVT) ||
1428             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1429            "Unexpected bitcast sequence");
1430     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1431       SDValue ViaVL =
1432           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1433       MVT ViaContainerVT =
1434           getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1435       SDValue Splat =
1436           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1437                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1438       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1439       return DAG.getBitcast(VT, Splat);
1440     }
1441   }
1442 
1443   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1444   // which constitute a large proportion of the elements. In such cases we can
1445   // splat a vector with the dominant element and make up the shortfall with
1446   // INSERT_VECTOR_ELTs.
1447   // Note that this includes vectors of 2 elements by association. The
1448   // upper-most element is the "dominant" one, allowing us to use a splat to
1449   // "insert" the upper element, and an insert of the lower element at position
1450   // 0, which improves codegen.
1451   SDValue DominantValue;
1452   unsigned MostCommonCount = 0;
1453   DenseMap<SDValue, unsigned> ValueCounts;
1454   unsigned NumUndefElts =
1455       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1456 
1457   for (SDValue V : Op->op_values()) {
1458     if (V.isUndef())
1459       continue;
1460 
1461     ValueCounts.insert(std::make_pair(V, 0));
1462     unsigned &Count = ValueCounts[V];
1463 
1464     // Is this value dominant? In case of a tie, prefer the highest element as
1465     // it's cheaper to insert near the beginning of a vector than it is at the
1466     // end.
1467     if (++Count >= MostCommonCount) {
1468       DominantValue = V;
1469       MostCommonCount = Count;
1470     }
1471   }
1472 
1473   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1474   unsigned NumDefElts = NumElts - NumUndefElts;
1475   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1476 
1477   // Don't perform this optimization when optimizing for size, since
1478   // materializing elements and inserting them tends to cause code bloat.
1479   if (!DAG.shouldOptForSize() &&
1480       ((MostCommonCount > DominantValueCountThreshold) ||
1481        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1482     // Start by splatting the most common element.
1483     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1484 
1485     DenseSet<SDValue> Processed{DominantValue};
1486     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1487     for (const auto &OpIdx : enumerate(Op->ops())) {
1488       const SDValue &V = OpIdx.value();
1489       if (V.isUndef() || !Processed.insert(V).second)
1490         continue;
1491       if (ValueCounts[V] == 1) {
1492         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1493                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1494       } else {
1495         // Blend in all instances of this value using a VSELECT, using a
1496         // mask where each bit signals whether that element is the one
1497         // we're after.
1498         SmallVector<SDValue> Ops;
1499         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1500           return DAG.getConstant(V == V1, DL, XLenVT);
1501         });
1502         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1503                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1504                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1505       }
1506     }
1507 
1508     return Vec;
1509   }
1510 
1511   return SDValue();
1512 }
1513 
1514 // Called by type legalization to handle splat of i64 on RV32.
1515 // FIXME: We can optimize this when the type has sign or zero bits in one
1516 // of the halves.
1517 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1518                                    SDValue VL, SelectionDAG &DAG,
1519                                    const RISCVSubtarget &Subtarget) {
1520   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1521                            DAG.getConstant(0, DL, MVT::i32));
1522   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1523                            DAG.getConstant(1, DL, MVT::i32));
1524 
1525   // Fall back to a stack store and stride x0 vector load.
1526   MachineFunction &MF = DAG.getMachineFunction();
1527   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
1528 
1529   // We use the same frame index we use for moving two i32s into 64-bit FPR.
1530   // This is an analogous operation.
1531   int FI = FuncInfo->getMoveF64FrameIndex(MF);
1532   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1533   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1534   SDValue StackSlot =
1535       DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()));
1536 
1537   SDValue Chain = DAG.getEntryNode();
1538   Lo = DAG.getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
1539 
1540   SDValue OffsetSlot =
1541       DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
1542   Hi = DAG.getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4), Align(8));
1543 
1544   Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
1545 
1546   MVT XLenVT = Subtarget.getXLenVT();
1547   SDVTList VTs = DAG.getVTList({VT, MVT::Other});
1548   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1549   SDValue Ops[] = {Chain, IntID, StackSlot, DAG.getRegister(RISCV::X0, XLenVT),
1550                    VL};
1551 
1552   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64,
1553                                  MPI, Align(8), MachineMemOperand::MOLoad);
1554 }
1555 
1556 // This function lowers a splat of a scalar operand Splat with the vector
1557 // length VL. It ensures the final sequence is type legal, which is useful when
1558 // lowering a splat after type legalization.
1559 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1560                                 SelectionDAG &DAG,
1561                                 const RISCVSubtarget &Subtarget) {
1562   if (VT.isFloatingPoint())
1563     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1564 
1565   MVT XLenVT = Subtarget.getXLenVT();
1566 
1567   // Simplest case is that the operand needs to be promoted to XLenVT.
1568   if (Scalar.getValueType().bitsLE(XLenVT)) {
1569     // If the operand is a constant, sign extend to increase our chances
1570     // of being able to use a .vi instruction. ANY_EXTEND would become a
1571     // a zero extend and the simm5 check in isel would fail.
1572     // FIXME: Should we ignore the upper bits in isel instead?
1573     unsigned ExtOpc =
1574         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1575     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1576     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1577   }
1578 
1579   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1580          "Unexpected scalar for splat lowering!");
1581 
1582   // If this is a sign-extended 32-bit constant, we can truncate it and rely
1583   // on the instruction to sign-extend since SEW>XLEN.
1584   if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) {
1585     if (isInt<32>(CVal->getSExtValue()))
1586       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
1587                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32),
1588                          VL);
1589   }
1590 
1591   // Otherwise use the more complicated splatting algorithm.
1592   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG, Subtarget);
1593 }
1594 
1595 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1596                                    const RISCVSubtarget &Subtarget) {
1597   SDValue V1 = Op.getOperand(0);
1598   SDValue V2 = Op.getOperand(1);
1599   SDLoc DL(Op);
1600   MVT XLenVT = Subtarget.getXLenVT();
1601   MVT VT = Op.getSimpleValueType();
1602   unsigned NumElts = VT.getVectorNumElements();
1603   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1604 
1605   MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1606 
1607   SDValue TrueMask, VL;
1608   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1609 
1610   if (SVN->isSplat()) {
1611     const int Lane = SVN->getSplatIndex();
1612     if (Lane >= 0) {
1613       MVT SVT = VT.getVectorElementType();
1614 
1615       // Turn splatted vector load into a strided load with an X0 stride.
1616       SDValue V = V1;
1617       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1618       // with undef.
1619       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1620       int Offset = Lane;
1621       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1622         int OpElements =
1623             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1624         V = V.getOperand(Offset / OpElements);
1625         Offset %= OpElements;
1626       }
1627 
1628       // We need to ensure the load isn't atomic or volatile.
1629       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1630         auto *Ld = cast<LoadSDNode>(V);
1631         Offset *= SVT.getStoreSize();
1632         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1633                                                    TypeSize::Fixed(Offset), DL);
1634 
1635         // If this is SEW=64 on RV32, use a strided load with a stride of x0.
1636         if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
1637           SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1638           SDValue IntID =
1639               DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1640           SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1641                            DAG.getRegister(RISCV::X0, XLenVT), VL};
1642           SDValue NewLoad = DAG.getMemIntrinsicNode(
1643               ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1644               DAG.getMachineFunction().getMachineMemOperand(
1645                   Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1646           DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1647           return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1648         }
1649 
1650         // Otherwise use a scalar load and splat. This will give the best
1651         // opportunity to fold a splat into the operation. ISel can turn it into
1652         // the x0 strided load if we aren't able to fold away the select.
1653         if (SVT.isFloatingPoint())
1654           V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
1655                           Ld->getPointerInfo().getWithOffset(Offset),
1656                           Ld->getOriginalAlign(),
1657                           Ld->getMemOperand()->getFlags());
1658         else
1659           V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
1660                              Ld->getPointerInfo().getWithOffset(Offset), SVT,
1661                              Ld->getOriginalAlign(),
1662                              Ld->getMemOperand()->getFlags());
1663         DAG.makeEquivalentMemoryOrdering(Ld, V);
1664 
1665         unsigned Opc =
1666             VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1667         SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
1668         return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1669       }
1670 
1671       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1672       assert(Lane < (int)NumElts && "Unexpected lane!");
1673       SDValue Gather =
1674           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1675                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1676       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1677     }
1678   }
1679 
1680   // Detect shuffles which can be re-expressed as vector selects; these are
1681   // shuffles in which each element in the destination is taken from an element
1682   // at the corresponding index in either source vectors.
1683   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1684     int MaskIndex = MaskIdx.value();
1685     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1686   });
1687 
1688   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1689 
1690   SmallVector<SDValue> MaskVals;
1691   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1692   // merged with a second vrgather.
1693   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1694 
1695   // By default we preserve the original operand order, and use a mask to
1696   // select LHS as true and RHS as false. However, since RVV vector selects may
1697   // feature splats but only on the LHS, we may choose to invert our mask and
1698   // instead select between RHS and LHS.
1699   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1700   bool InvertMask = IsSelect == SwapOps;
1701 
1702   // Now construct the mask that will be used by the vselect or blended
1703   // vrgather operation. For vrgathers, construct the appropriate indices into
1704   // each vector.
1705   for (int MaskIndex : SVN->getMask()) {
1706     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1707     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1708     if (!IsSelect) {
1709       bool IsLHS = MaskIndex < (int)NumElts;
1710       // For "undef" elements of -1, shuffle in element 0 instead.
1711       GatherIndicesLHS.push_back(
1712           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1713       // TODO: If we're masking out unused elements anyway, it might produce
1714       // better code if we use the most-common element index instead of 0.
1715       GatherIndicesRHS.push_back(
1716           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1717     }
1718   }
1719 
1720   if (SwapOps) {
1721     std::swap(V1, V2);
1722     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1723   }
1724 
1725   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1726   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1727   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1728 
1729   if (IsSelect)
1730     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1731 
1732   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1733     // On such a large vector we're unable to use i8 as the index type.
1734     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1735     // may involve vector splitting if we're already at LMUL=8, or our
1736     // user-supplied maximum fixed-length LMUL.
1737     return SDValue();
1738   }
1739 
1740   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1741   MVT IndexVT = VT.changeTypeToInteger();
1742   // Since we can't introduce illegal index types at this stage, use i16 and
1743   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1744   // than XLenVT.
1745   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1746     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1747     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1748   }
1749 
1750   MVT IndexContainerVT =
1751       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1752 
1753   SDValue Gather;
1754   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1755   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1756   if (SDValue SplatValue = DAG.getSplatValue(V1)) {
1757     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1758   } else {
1759     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1760     LHSIndices =
1761         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1762 
1763     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1764     Gather =
1765         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1766   }
1767 
1768   // If a second vector operand is used by this shuffle, blend it in with an
1769   // additional vrgather.
1770   if (!V2.isUndef()) {
1771     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1772     SelectMask =
1773         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1774 
1775     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1776     RHSIndices =
1777         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1778 
1779     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1780     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1781     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1782                          Gather, VL);
1783   }
1784 
1785   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1786 }
1787 
1788 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1789                                      SDLoc DL, SelectionDAG &DAG,
1790                                      const RISCVSubtarget &Subtarget) {
1791   if (VT.isScalableVector())
1792     return DAG.getFPExtendOrRound(Op, DL, VT);
1793   assert(VT.isFixedLengthVector() &&
1794          "Unexpected value type for RVV FP extend/round lowering");
1795   SDValue Mask, VL;
1796   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1797   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1798                         ? RISCVISD::FP_EXTEND_VL
1799                         : RISCVISD::FP_ROUND_VL;
1800   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1801 }
1802 
1803 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1804                                             SelectionDAG &DAG) const {
1805   switch (Op.getOpcode()) {
1806   default:
1807     report_fatal_error("unimplemented operand");
1808   case ISD::GlobalAddress:
1809     return lowerGlobalAddress(Op, DAG);
1810   case ISD::BlockAddress:
1811     return lowerBlockAddress(Op, DAG);
1812   case ISD::ConstantPool:
1813     return lowerConstantPool(Op, DAG);
1814   case ISD::JumpTable:
1815     return lowerJumpTable(Op, DAG);
1816   case ISD::GlobalTLSAddress:
1817     return lowerGlobalTLSAddress(Op, DAG);
1818   case ISD::SELECT:
1819     return lowerSELECT(Op, DAG);
1820   case ISD::BRCOND:
1821     return lowerBRCOND(Op, DAG);
1822   case ISD::VASTART:
1823     return lowerVASTART(Op, DAG);
1824   case ISD::FRAMEADDR:
1825     return lowerFRAMEADDR(Op, DAG);
1826   case ISD::RETURNADDR:
1827     return lowerRETURNADDR(Op, DAG);
1828   case ISD::SHL_PARTS:
1829     return lowerShiftLeftParts(Op, DAG);
1830   case ISD::SRA_PARTS:
1831     return lowerShiftRightParts(Op, DAG, true);
1832   case ISD::SRL_PARTS:
1833     return lowerShiftRightParts(Op, DAG, false);
1834   case ISD::BITCAST: {
1835     SDLoc DL(Op);
1836     EVT VT = Op.getValueType();
1837     SDValue Op0 = Op.getOperand(0);
1838     EVT Op0VT = Op0.getValueType();
1839     MVT XLenVT = Subtarget.getXLenVT();
1840     if (VT.isFixedLengthVector()) {
1841       // We can handle fixed length vector bitcasts with a simple replacement
1842       // in isel.
1843       if (Op0VT.isFixedLengthVector())
1844         return Op;
1845       // When bitcasting from scalar to fixed-length vector, insert the scalar
1846       // into a one-element vector of the result type, and perform a vector
1847       // bitcast.
1848       if (!Op0VT.isVector()) {
1849         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
1850         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
1851                                               DAG.getUNDEF(BVT), Op0,
1852                                               DAG.getConstant(0, DL, XLenVT)));
1853       }
1854       return SDValue();
1855     }
1856     // Custom-legalize bitcasts from fixed-length vector types to scalar types
1857     // thus: bitcast the vector to a one-element vector type whose element type
1858     // is the same as the result type, and extract the first element.
1859     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
1860       LLVMContext &Context = *DAG.getContext();
1861       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
1862       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
1863                          DAG.getConstant(0, DL, XLenVT));
1864     }
1865     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
1866       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
1867       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1868       return FPConv;
1869     }
1870     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
1871         Subtarget.hasStdExtF()) {
1872       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1873       SDValue FPConv =
1874           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1875       return FPConv;
1876     }
1877     return SDValue();
1878   }
1879   case ISD::INTRINSIC_WO_CHAIN:
1880     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1881   case ISD::INTRINSIC_W_CHAIN:
1882     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1883   case ISD::BSWAP:
1884   case ISD::BITREVERSE: {
1885     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1886     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1887     MVT VT = Op.getSimpleValueType();
1888     SDLoc DL(Op);
1889     // Start with the maximum immediate value which is the bitwidth - 1.
1890     unsigned Imm = VT.getSizeInBits() - 1;
1891     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1892     if (Op.getOpcode() == ISD::BSWAP)
1893       Imm &= ~0x7U;
1894     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
1895                        DAG.getConstant(Imm, DL, VT));
1896   }
1897   case ISD::FSHL:
1898   case ISD::FSHR: {
1899     MVT VT = Op.getSimpleValueType();
1900     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1901     SDLoc DL(Op);
1902     if (Op.getOperand(2).getOpcode() == ISD::Constant)
1903       return Op;
1904     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1905     // use log(XLen) bits. Mask the shift amount accordingly.
1906     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
1907     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
1908                                 DAG.getConstant(ShAmtWidth, DL, VT));
1909     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
1910     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
1911   }
1912   case ISD::TRUNCATE: {
1913     SDLoc DL(Op);
1914     MVT VT = Op.getSimpleValueType();
1915     // Only custom-lower vector truncates
1916     if (!VT.isVector())
1917       return Op;
1918 
1919     // Truncates to mask types are handled differently
1920     if (VT.getVectorElementType() == MVT::i1)
1921       return lowerVectorMaskTrunc(Op, DAG);
1922 
1923     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
1924     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
1925     // truncate by one power of two at a time.
1926     MVT DstEltVT = VT.getVectorElementType();
1927 
1928     SDValue Src = Op.getOperand(0);
1929     MVT SrcVT = Src.getSimpleValueType();
1930     MVT SrcEltVT = SrcVT.getVectorElementType();
1931 
1932     assert(DstEltVT.bitsLT(SrcEltVT) &&
1933            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
1934            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
1935            "Unexpected vector truncate lowering");
1936 
1937     MVT ContainerVT = SrcVT;
1938     if (SrcVT.isFixedLengthVector()) {
1939       ContainerVT = getContainerForFixedLengthVector(SrcVT);
1940       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
1941     }
1942 
1943     SDValue Result = Src;
1944     SDValue Mask, VL;
1945     std::tie(Mask, VL) =
1946         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
1947     LLVMContext &Context = *DAG.getContext();
1948     const ElementCount Count = ContainerVT.getVectorElementCount();
1949     do {
1950       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
1951       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
1952       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
1953                            Mask, VL);
1954     } while (SrcEltVT != DstEltVT);
1955 
1956     if (SrcVT.isFixedLengthVector())
1957       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
1958 
1959     return Result;
1960   }
1961   case ISD::ANY_EXTEND:
1962   case ISD::ZERO_EXTEND:
1963     if (Op.getOperand(0).getValueType().isVector() &&
1964         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1965       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
1966     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
1967   case ISD::SIGN_EXTEND:
1968     if (Op.getOperand(0).getValueType().isVector() &&
1969         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1970       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
1971     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
1972   case ISD::SPLAT_VECTOR_PARTS:
1973     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
1974   case ISD::INSERT_VECTOR_ELT:
1975     return lowerINSERT_VECTOR_ELT(Op, DAG);
1976   case ISD::EXTRACT_VECTOR_ELT:
1977     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1978   case ISD::VSCALE: {
1979     MVT VT = Op.getSimpleValueType();
1980     SDLoc DL(Op);
1981     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
1982     // We define our scalable vector types for lmul=1 to use a 64 bit known
1983     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
1984     // vscale as VLENB / 8.
1985     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
1986     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
1987                                  DAG.getConstant(3, DL, VT));
1988     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
1989   }
1990   case ISD::FP_EXTEND: {
1991     // RVV can only do fp_extend to types double the size as the source. We
1992     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
1993     // via f32.
1994     SDLoc DL(Op);
1995     MVT VT = Op.getSimpleValueType();
1996     SDValue Src = Op.getOperand(0);
1997     MVT SrcVT = Src.getSimpleValueType();
1998 
1999     // Prepare any fixed-length vector operands.
2000     MVT ContainerVT = VT;
2001     if (SrcVT.isFixedLengthVector()) {
2002       ContainerVT = getContainerForFixedLengthVector(VT);
2003       MVT SrcContainerVT =
2004           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2005       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2006     }
2007 
2008     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2009         SrcVT.getVectorElementType() != MVT::f16) {
2010       // For scalable vectors, we only need to close the gap between
2011       // vXf16->vXf64.
2012       if (!VT.isFixedLengthVector())
2013         return Op;
2014       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2015       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2016       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2017     }
2018 
2019     MVT InterVT = VT.changeVectorElementType(MVT::f32);
2020     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2021     SDValue IntermediateExtend = getRVVFPExtendOrRound(
2022         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2023 
2024     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2025                                            DL, DAG, Subtarget);
2026     if (VT.isFixedLengthVector())
2027       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2028     return Extend;
2029   }
2030   case ISD::FP_ROUND: {
2031     // RVV can only do fp_round to types half the size as the source. We
2032     // custom-lower f64->f16 rounds via RVV's round-to-odd float
2033     // conversion instruction.
2034     SDLoc DL(Op);
2035     MVT VT = Op.getSimpleValueType();
2036     SDValue Src = Op.getOperand(0);
2037     MVT SrcVT = Src.getSimpleValueType();
2038 
2039     // Prepare any fixed-length vector operands.
2040     MVT ContainerVT = VT;
2041     if (VT.isFixedLengthVector()) {
2042       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2043       ContainerVT =
2044           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2045       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2046     }
2047 
2048     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2049         SrcVT.getVectorElementType() != MVT::f64) {
2050       // For scalable vectors, we only need to close the gap between
2051       // vXf64<->vXf16.
2052       if (!VT.isFixedLengthVector())
2053         return Op;
2054       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2055       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2056       return convertFromScalableVector(VT, Src, DAG, Subtarget);
2057     }
2058 
2059     SDValue Mask, VL;
2060     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2061 
2062     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2063     SDValue IntermediateRound =
2064         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2065     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2066                                           DL, DAG, Subtarget);
2067 
2068     if (VT.isFixedLengthVector())
2069       return convertFromScalableVector(VT, Round, DAG, Subtarget);
2070     return Round;
2071   }
2072   case ISD::FP_TO_SINT:
2073   case ISD::FP_TO_UINT:
2074   case ISD::SINT_TO_FP:
2075   case ISD::UINT_TO_FP: {
2076     // RVV can only do fp<->int conversions to types half/double the size as
2077     // the source. We custom-lower any conversions that do two hops into
2078     // sequences.
2079     MVT VT = Op.getSimpleValueType();
2080     if (!VT.isVector())
2081       return Op;
2082     SDLoc DL(Op);
2083     SDValue Src = Op.getOperand(0);
2084     MVT EltVT = VT.getVectorElementType();
2085     MVT SrcVT = Src.getSimpleValueType();
2086     MVT SrcEltVT = SrcVT.getVectorElementType();
2087     unsigned EltSize = EltVT.getSizeInBits();
2088     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2089     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2090            "Unexpected vector element types");
2091 
2092     bool IsInt2FP = SrcEltVT.isInteger();
2093     // Widening conversions
2094     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2095       if (IsInt2FP) {
2096         // Do a regular integer sign/zero extension then convert to float.
2097         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2098                                       VT.getVectorElementCount());
2099         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2100                                  ? ISD::ZERO_EXTEND
2101                                  : ISD::SIGN_EXTEND;
2102         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2103         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2104       }
2105       // FP2Int
2106       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2107       // Do one doubling fp_extend then complete the operation by converting
2108       // to int.
2109       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2110       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2111       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2112     }
2113 
2114     // Narrowing conversions
2115     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2116       if (IsInt2FP) {
2117         // One narrowing int_to_fp, then an fp_round.
2118         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2119         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2120         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2121         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2122       }
2123       // FP2Int
2124       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2125       // representable by the integer, the result is poison.
2126       MVT IVecVT =
2127           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2128                            VT.getVectorElementCount());
2129       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2130       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2131     }
2132 
2133     // Scalable vectors can exit here. Patterns will handle equally-sized
2134     // conversions halving/doubling ones.
2135     if (!VT.isFixedLengthVector())
2136       return Op;
2137 
2138     // For fixed-length vectors we lower to a custom "VL" node.
2139     unsigned RVVOpc = 0;
2140     switch (Op.getOpcode()) {
2141     default:
2142       llvm_unreachable("Impossible opcode");
2143     case ISD::FP_TO_SINT:
2144       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2145       break;
2146     case ISD::FP_TO_UINT:
2147       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2148       break;
2149     case ISD::SINT_TO_FP:
2150       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2151       break;
2152     case ISD::UINT_TO_FP:
2153       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2154       break;
2155     }
2156 
2157     MVT ContainerVT, SrcContainerVT;
2158     // Derive the reference container type from the larger vector type.
2159     if (SrcEltSize > EltSize) {
2160       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2161       ContainerVT =
2162           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2163     } else {
2164       ContainerVT = getContainerForFixedLengthVector(VT);
2165       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2166     }
2167 
2168     SDValue Mask, VL;
2169     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2170 
2171     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2172     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2173     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2174   }
2175   case ISD::VECREDUCE_ADD:
2176   case ISD::VECREDUCE_UMAX:
2177   case ISD::VECREDUCE_SMAX:
2178   case ISD::VECREDUCE_UMIN:
2179   case ISD::VECREDUCE_SMIN:
2180     return lowerVECREDUCE(Op, DAG);
2181   case ISD::VECREDUCE_AND:
2182   case ISD::VECREDUCE_OR:
2183   case ISD::VECREDUCE_XOR:
2184     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2185       return lowerVectorMaskVECREDUCE(Op, DAG);
2186     return lowerVECREDUCE(Op, DAG);
2187   case ISD::VECREDUCE_FADD:
2188   case ISD::VECREDUCE_SEQ_FADD:
2189     return lowerFPVECREDUCE(Op, DAG);
2190   case ISD::INSERT_SUBVECTOR:
2191     return lowerINSERT_SUBVECTOR(Op, DAG);
2192   case ISD::EXTRACT_SUBVECTOR:
2193     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2194   case ISD::STEP_VECTOR:
2195     return lowerSTEP_VECTOR(Op, DAG);
2196   case ISD::VECTOR_REVERSE:
2197     return lowerVECTOR_REVERSE(Op, DAG);
2198   case ISD::BUILD_VECTOR:
2199     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2200   case ISD::VECTOR_SHUFFLE:
2201     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2202   case ISD::CONCAT_VECTORS: {
2203     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2204     // better than going through the stack, as the default expansion does.
2205     SDLoc DL(Op);
2206     MVT VT = Op.getSimpleValueType();
2207     unsigned NumOpElts =
2208         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2209     SDValue Vec = DAG.getUNDEF(VT);
2210     for (const auto &OpIdx : enumerate(Op->ops()))
2211       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2212                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2213     return Vec;
2214   }
2215   case ISD::LOAD:
2216     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2217   case ISD::STORE:
2218     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2219   case ISD::MLOAD:
2220     return lowerMLOAD(Op, DAG);
2221   case ISD::MSTORE:
2222     return lowerMSTORE(Op, DAG);
2223   case ISD::SETCC:
2224     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2225   case ISD::ADD:
2226     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2227   case ISD::SUB:
2228     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2229   case ISD::MUL:
2230     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2231   case ISD::MULHS:
2232     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2233   case ISD::MULHU:
2234     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2235   case ISD::AND:
2236     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2237                                               RISCVISD::AND_VL);
2238   case ISD::OR:
2239     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2240                                               RISCVISD::OR_VL);
2241   case ISD::XOR:
2242     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2243                                               RISCVISD::XOR_VL);
2244   case ISD::SDIV:
2245     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2246   case ISD::SREM:
2247     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2248   case ISD::UDIV:
2249     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2250   case ISD::UREM:
2251     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2252   case ISD::SHL:
2253     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2254   case ISD::SRA:
2255     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2256   case ISD::SRL:
2257     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2258   case ISD::FADD:
2259     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2260   case ISD::FSUB:
2261     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2262   case ISD::FMUL:
2263     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2264   case ISD::FDIV:
2265     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2266   case ISD::FNEG:
2267     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2268   case ISD::FABS:
2269     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2270   case ISD::FSQRT:
2271     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2272   case ISD::FMA:
2273     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2274   case ISD::SMIN:
2275     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2276   case ISD::SMAX:
2277     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2278   case ISD::UMIN:
2279     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2280   case ISD::UMAX:
2281     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2282   case ISD::FMINNUM:
2283     return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2284   case ISD::FMAXNUM:
2285     return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2286   case ISD::ABS:
2287     return lowerABS(Op, DAG);
2288   case ISD::VSELECT:
2289     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2290   case ISD::FCOPYSIGN:
2291     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2292   case ISD::MGATHER:
2293     return lowerMGATHER(Op, DAG);
2294   case ISD::MSCATTER:
2295     return lowerMSCATTER(Op, DAG);
2296   case ISD::FLT_ROUNDS_:
2297     return lowerGET_ROUNDING(Op, DAG);
2298   case ISD::SET_ROUNDING:
2299     return lowerSET_ROUNDING(Op, DAG);
2300   }
2301 }
2302 
2303 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2304                              SelectionDAG &DAG, unsigned Flags) {
2305   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2306 }
2307 
2308 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2309                              SelectionDAG &DAG, unsigned Flags) {
2310   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2311                                    Flags);
2312 }
2313 
2314 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2315                              SelectionDAG &DAG, unsigned Flags) {
2316   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2317                                    N->getOffset(), Flags);
2318 }
2319 
2320 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2321                              SelectionDAG &DAG, unsigned Flags) {
2322   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2323 }
2324 
2325 template <class NodeTy>
2326 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2327                                      bool IsLocal) const {
2328   SDLoc DL(N);
2329   EVT Ty = getPointerTy(DAG.getDataLayout());
2330 
2331   if (isPositionIndependent()) {
2332     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2333     if (IsLocal)
2334       // Use PC-relative addressing to access the symbol. This generates the
2335       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2336       // %pcrel_lo(auipc)).
2337       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2338 
2339     // Use PC-relative addressing to access the GOT for this symbol, then load
2340     // the address from the GOT. This generates the pattern (PseudoLA sym),
2341     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2342     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2343   }
2344 
2345   switch (getTargetMachine().getCodeModel()) {
2346   default:
2347     report_fatal_error("Unsupported code model for lowering");
2348   case CodeModel::Small: {
2349     // Generate a sequence for accessing addresses within the first 2 GiB of
2350     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2351     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2352     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2353     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2354     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2355   }
2356   case CodeModel::Medium: {
2357     // Generate a sequence for accessing addresses within any 2GiB range within
2358     // the address space. This generates the pattern (PseudoLLA sym), which
2359     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2360     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2361     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2362   }
2363   }
2364 }
2365 
2366 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2367                                                 SelectionDAG &DAG) const {
2368   SDLoc DL(Op);
2369   EVT Ty = Op.getValueType();
2370   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2371   int64_t Offset = N->getOffset();
2372   MVT XLenVT = Subtarget.getXLenVT();
2373 
2374   const GlobalValue *GV = N->getGlobal();
2375   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2376   SDValue Addr = getAddr(N, DAG, IsLocal);
2377 
2378   // In order to maximise the opportunity for common subexpression elimination,
2379   // emit a separate ADD node for the global address offset instead of folding
2380   // it in the global address node. Later peephole optimisations may choose to
2381   // fold it back in when profitable.
2382   if (Offset != 0)
2383     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2384                        DAG.getConstant(Offset, DL, XLenVT));
2385   return Addr;
2386 }
2387 
2388 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2389                                                SelectionDAG &DAG) const {
2390   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2391 
2392   return getAddr(N, DAG);
2393 }
2394 
2395 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2396                                                SelectionDAG &DAG) const {
2397   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2398 
2399   return getAddr(N, DAG);
2400 }
2401 
2402 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2403                                             SelectionDAG &DAG) const {
2404   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2405 
2406   return getAddr(N, DAG);
2407 }
2408 
2409 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2410                                               SelectionDAG &DAG,
2411                                               bool UseGOT) const {
2412   SDLoc DL(N);
2413   EVT Ty = getPointerTy(DAG.getDataLayout());
2414   const GlobalValue *GV = N->getGlobal();
2415   MVT XLenVT = Subtarget.getXLenVT();
2416 
2417   if (UseGOT) {
2418     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2419     // load the address from the GOT and add the thread pointer. This generates
2420     // the pattern (PseudoLA_TLS_IE sym), which expands to
2421     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2422     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2423     SDValue Load =
2424         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2425 
2426     // Add the thread pointer.
2427     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2428     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2429   }
2430 
2431   // Generate a sequence for accessing the address relative to the thread
2432   // pointer, with the appropriate adjustment for the thread pointer offset.
2433   // This generates the pattern
2434   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2435   SDValue AddrHi =
2436       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2437   SDValue AddrAdd =
2438       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2439   SDValue AddrLo =
2440       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2441 
2442   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2443   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2444   SDValue MNAdd = SDValue(
2445       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2446       0);
2447   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2448 }
2449 
2450 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2451                                                SelectionDAG &DAG) const {
2452   SDLoc DL(N);
2453   EVT Ty = getPointerTy(DAG.getDataLayout());
2454   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2455   const GlobalValue *GV = N->getGlobal();
2456 
2457   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2458   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2459   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2460   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2461   SDValue Load =
2462       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2463 
2464   // Prepare argument list to generate call.
2465   ArgListTy Args;
2466   ArgListEntry Entry;
2467   Entry.Node = Load;
2468   Entry.Ty = CallTy;
2469   Args.push_back(Entry);
2470 
2471   // Setup call to __tls_get_addr.
2472   TargetLowering::CallLoweringInfo CLI(DAG);
2473   CLI.setDebugLoc(DL)
2474       .setChain(DAG.getEntryNode())
2475       .setLibCallee(CallingConv::C, CallTy,
2476                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2477                     std::move(Args));
2478 
2479   return LowerCallTo(CLI).first;
2480 }
2481 
2482 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2483                                                    SelectionDAG &DAG) const {
2484   SDLoc DL(Op);
2485   EVT Ty = Op.getValueType();
2486   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2487   int64_t Offset = N->getOffset();
2488   MVT XLenVT = Subtarget.getXLenVT();
2489 
2490   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2491 
2492   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2493       CallingConv::GHC)
2494     report_fatal_error("In GHC calling convention TLS is not supported");
2495 
2496   SDValue Addr;
2497   switch (Model) {
2498   case TLSModel::LocalExec:
2499     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2500     break;
2501   case TLSModel::InitialExec:
2502     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2503     break;
2504   case TLSModel::LocalDynamic:
2505   case TLSModel::GeneralDynamic:
2506     Addr = getDynamicTLSAddr(N, DAG);
2507     break;
2508   }
2509 
2510   // In order to maximise the opportunity for common subexpression elimination,
2511   // emit a separate ADD node for the global address offset instead of folding
2512   // it in the global address node. Later peephole optimisations may choose to
2513   // fold it back in when profitable.
2514   if (Offset != 0)
2515     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2516                        DAG.getConstant(Offset, DL, XLenVT));
2517   return Addr;
2518 }
2519 
2520 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2521   SDValue CondV = Op.getOperand(0);
2522   SDValue TrueV = Op.getOperand(1);
2523   SDValue FalseV = Op.getOperand(2);
2524   SDLoc DL(Op);
2525   MVT XLenVT = Subtarget.getXLenVT();
2526 
2527   // If the result type is XLenVT and CondV is the output of a SETCC node
2528   // which also operated on XLenVT inputs, then merge the SETCC node into the
2529   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2530   // compare+branch instructions. i.e.:
2531   // (select (setcc lhs, rhs, cc), truev, falsev)
2532   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2533   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2534       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2535     SDValue LHS = CondV.getOperand(0);
2536     SDValue RHS = CondV.getOperand(1);
2537     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2538     ISD::CondCode CCVal = CC->get();
2539 
2540     // Special case for a select of 2 constants that have a diffence of 1.
2541     // Normally this is done by DAGCombine, but if the select is introduced by
2542     // type legalization or op legalization, we miss it. Restricting to SETLT
2543     // case for now because that is what signed saturating add/sub need.
2544     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2545     // but we would probably want to swap the true/false values if the condition
2546     // is SETGE/SETLE to avoid an XORI.
2547     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2548         CCVal == ISD::SETLT) {
2549       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2550       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2551       if (TrueVal - 1 == FalseVal)
2552         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2553       if (TrueVal + 1 == FalseVal)
2554         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2555     }
2556 
2557     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2558 
2559     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2560     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2561     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2562   }
2563 
2564   // Otherwise:
2565   // (select condv, truev, falsev)
2566   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2567   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2568   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2569 
2570   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2571 
2572   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2573 }
2574 
2575 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2576   SDValue CondV = Op.getOperand(1);
2577   SDLoc DL(Op);
2578   MVT XLenVT = Subtarget.getXLenVT();
2579 
2580   if (CondV.getOpcode() == ISD::SETCC &&
2581       CondV.getOperand(0).getValueType() == XLenVT) {
2582     SDValue LHS = CondV.getOperand(0);
2583     SDValue RHS = CondV.getOperand(1);
2584     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2585 
2586     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2587 
2588     SDValue TargetCC = DAG.getCondCode(CCVal);
2589     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2590                        LHS, RHS, TargetCC, Op.getOperand(2));
2591   }
2592 
2593   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2594                      CondV, DAG.getConstant(0, DL, XLenVT),
2595                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2596 }
2597 
2598 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2599   MachineFunction &MF = DAG.getMachineFunction();
2600   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2601 
2602   SDLoc DL(Op);
2603   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2604                                  getPointerTy(MF.getDataLayout()));
2605 
2606   // vastart just stores the address of the VarArgsFrameIndex slot into the
2607   // memory location argument.
2608   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2609   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2610                       MachinePointerInfo(SV));
2611 }
2612 
2613 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2614                                             SelectionDAG &DAG) const {
2615   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2616   MachineFunction &MF = DAG.getMachineFunction();
2617   MachineFrameInfo &MFI = MF.getFrameInfo();
2618   MFI.setFrameAddressIsTaken(true);
2619   Register FrameReg = RI.getFrameRegister(MF);
2620   int XLenInBytes = Subtarget.getXLen() / 8;
2621 
2622   EVT VT = Op.getValueType();
2623   SDLoc DL(Op);
2624   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2625   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2626   while (Depth--) {
2627     int Offset = -(XLenInBytes * 2);
2628     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2629                               DAG.getIntPtrConstant(Offset, DL));
2630     FrameAddr =
2631         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2632   }
2633   return FrameAddr;
2634 }
2635 
2636 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2637                                              SelectionDAG &DAG) const {
2638   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2639   MachineFunction &MF = DAG.getMachineFunction();
2640   MachineFrameInfo &MFI = MF.getFrameInfo();
2641   MFI.setReturnAddressIsTaken(true);
2642   MVT XLenVT = Subtarget.getXLenVT();
2643   int XLenInBytes = Subtarget.getXLen() / 8;
2644 
2645   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2646     return SDValue();
2647 
2648   EVT VT = Op.getValueType();
2649   SDLoc DL(Op);
2650   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2651   if (Depth) {
2652     int Off = -XLenInBytes;
2653     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2654     SDValue Offset = DAG.getConstant(Off, DL, VT);
2655     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2656                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2657                        MachinePointerInfo());
2658   }
2659 
2660   // Return the value of the return address register, marking it an implicit
2661   // live-in.
2662   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2663   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2664 }
2665 
2666 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2667                                                  SelectionDAG &DAG) const {
2668   SDLoc DL(Op);
2669   SDValue Lo = Op.getOperand(0);
2670   SDValue Hi = Op.getOperand(1);
2671   SDValue Shamt = Op.getOperand(2);
2672   EVT VT = Lo.getValueType();
2673 
2674   // if Shamt-XLEN < 0: // Shamt < XLEN
2675   //   Lo = Lo << Shamt
2676   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2677   // else:
2678   //   Lo = 0
2679   //   Hi = Lo << (Shamt-XLEN)
2680 
2681   SDValue Zero = DAG.getConstant(0, DL, VT);
2682   SDValue One = DAG.getConstant(1, DL, VT);
2683   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2684   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2685   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2686   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2687 
2688   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2689   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2690   SDValue ShiftRightLo =
2691       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2692   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2693   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2694   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2695 
2696   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2697 
2698   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2699   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2700 
2701   SDValue Parts[2] = {Lo, Hi};
2702   return DAG.getMergeValues(Parts, DL);
2703 }
2704 
2705 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2706                                                   bool IsSRA) const {
2707   SDLoc DL(Op);
2708   SDValue Lo = Op.getOperand(0);
2709   SDValue Hi = Op.getOperand(1);
2710   SDValue Shamt = Op.getOperand(2);
2711   EVT VT = Lo.getValueType();
2712 
2713   // SRA expansion:
2714   //   if Shamt-XLEN < 0: // Shamt < XLEN
2715   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2716   //     Hi = Hi >>s Shamt
2717   //   else:
2718   //     Lo = Hi >>s (Shamt-XLEN);
2719   //     Hi = Hi >>s (XLEN-1)
2720   //
2721   // SRL expansion:
2722   //   if Shamt-XLEN < 0: // Shamt < XLEN
2723   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2724   //     Hi = Hi >>u Shamt
2725   //   else:
2726   //     Lo = Hi >>u (Shamt-XLEN);
2727   //     Hi = 0;
2728 
2729   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2730 
2731   SDValue Zero = DAG.getConstant(0, DL, VT);
2732   SDValue One = DAG.getConstant(1, DL, VT);
2733   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2734   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2735   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2736   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2737 
2738   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2739   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2740   SDValue ShiftLeftHi =
2741       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2742   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2743   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2744   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2745   SDValue HiFalse =
2746       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2747 
2748   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2749 
2750   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2751   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2752 
2753   SDValue Parts[2] = {Lo, Hi};
2754   return DAG.getMergeValues(Parts, DL);
2755 }
2756 
2757 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2758 // illegal (currently only vXi64 RV32).
2759 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2760 // them to SPLAT_VECTOR_I64
2761 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2762                                                      SelectionDAG &DAG) const {
2763   SDLoc DL(Op);
2764   EVT VecVT = Op.getValueType();
2765   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2766          "Unexpected SPLAT_VECTOR_PARTS lowering");
2767 
2768   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2769   SDValue Lo = Op.getOperand(0);
2770   SDValue Hi = Op.getOperand(1);
2771 
2772   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2773     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2774     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2775     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2776     // node in order to try and match RVV vector/scalar instructions.
2777     if ((LoC >> 31) == HiC)
2778       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2779   }
2780 
2781   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
2782   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
2783       isa<ConstantSDNode>(Hi.getOperand(1)) &&
2784       Hi.getConstantOperandVal(1) == 31)
2785     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2786 
2787   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
2788   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
2789   // vmv.v.x vX, hi
2790   // vsll.vx vX, vX, /*32*/
2791   // vmv.v.x vY, lo
2792   // vsll.vx vY, vY, /*32*/
2793   // vsrl.vx vY, vY, /*32*/
2794   // vor.vv vX, vX, vY
2795   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
2796 
2797   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2798   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
2799   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
2800 
2801   if (isNullConstant(Hi))
2802     return Lo;
2803 
2804   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
2805   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
2806 
2807   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
2808 }
2809 
2810 // Custom-lower extensions from mask vectors by using a vselect either with 1
2811 // for zero/any-extension or -1 for sign-extension:
2812 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2813 // Note that any-extension is lowered identically to zero-extension.
2814 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2815                                                 int64_t ExtTrueVal) const {
2816   SDLoc DL(Op);
2817   MVT VecVT = Op.getSimpleValueType();
2818   SDValue Src = Op.getOperand(0);
2819   // Only custom-lower extensions from mask types
2820   assert(Src.getValueType().isVector() &&
2821          Src.getValueType().getVectorElementType() == MVT::i1);
2822 
2823   MVT XLenVT = Subtarget.getXLenVT();
2824   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2825   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2826 
2827   if (VecVT.isScalableVector()) {
2828     // Be careful not to introduce illegal scalar types at this stage, and be
2829     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2830     // illegal and must be expanded. Since we know that the constants are
2831     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2832     bool IsRV32E64 =
2833         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2834 
2835     if (!IsRV32E64) {
2836       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2837       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2838     } else {
2839       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2840       SplatTrueVal =
2841           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2842     }
2843 
2844     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2845   }
2846 
2847   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2848   MVT I1ContainerVT =
2849       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2850 
2851   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2852 
2853   SDValue Mask, VL;
2854   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2855 
2856   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
2857   SplatTrueVal =
2858       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
2859   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
2860                                SplatTrueVal, SplatZero, VL);
2861 
2862   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
2863 }
2864 
2865 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
2866     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
2867   MVT ExtVT = Op.getSimpleValueType();
2868   // Only custom-lower extensions from fixed-length vector types.
2869   if (!ExtVT.isFixedLengthVector())
2870     return Op;
2871   MVT VT = Op.getOperand(0).getSimpleValueType();
2872   // Grab the canonical container type for the extended type. Infer the smaller
2873   // type from that to ensure the same number of vector elements, as we know
2874   // the LMUL will be sufficient to hold the smaller type.
2875   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
2876   // Get the extended container type manually to ensure the same number of
2877   // vector elements between source and dest.
2878   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
2879                                      ContainerExtVT.getVectorElementCount());
2880 
2881   SDValue Op1 =
2882       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
2883 
2884   SDLoc DL(Op);
2885   SDValue Mask, VL;
2886   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2887 
2888   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
2889 
2890   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
2891 }
2892 
2893 // Custom-lower truncations from vectors to mask vectors by using a mask and a
2894 // setcc operation:
2895 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
2896 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
2897                                                   SelectionDAG &DAG) const {
2898   SDLoc DL(Op);
2899   EVT MaskVT = Op.getValueType();
2900   // Only expect to custom-lower truncations to mask types
2901   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
2902          "Unexpected type for vector mask lowering");
2903   SDValue Src = Op.getOperand(0);
2904   MVT VecVT = Src.getSimpleValueType();
2905 
2906   // If this is a fixed vector, we need to convert it to a scalable vector.
2907   MVT ContainerVT = VecVT;
2908   if (VecVT.isFixedLengthVector()) {
2909     ContainerVT = getContainerForFixedLengthVector(VecVT);
2910     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2911   }
2912 
2913   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
2914   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2915 
2916   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
2917   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
2918 
2919   if (VecVT.isScalableVector()) {
2920     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
2921     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
2922   }
2923 
2924   SDValue Mask, VL;
2925   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2926 
2927   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2928   SDValue Trunc =
2929       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
2930   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
2931                       DAG.getCondCode(ISD::SETNE), Mask, VL);
2932   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
2933 }
2934 
2935 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
2936 // first position of a vector, and that vector is slid up to the insert index.
2937 // By limiting the active vector length to index+1 and merging with the
2938 // original vector (with an undisturbed tail policy for elements >= VL), we
2939 // achieve the desired result of leaving all elements untouched except the one
2940 // at VL-1, which is replaced with the desired value.
2941 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2942                                                     SelectionDAG &DAG) const {
2943   SDLoc DL(Op);
2944   MVT VecVT = Op.getSimpleValueType();
2945   SDValue Vec = Op.getOperand(0);
2946   SDValue Val = Op.getOperand(1);
2947   SDValue Idx = Op.getOperand(2);
2948 
2949   MVT ContainerVT = VecVT;
2950   // If the operand is a fixed-length vector, convert to a scalable one.
2951   if (VecVT.isFixedLengthVector()) {
2952     ContainerVT = getContainerForFixedLengthVector(VecVT);
2953     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2954   }
2955 
2956   MVT XLenVT = Subtarget.getXLenVT();
2957 
2958   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2959   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
2960   // Even i64-element vectors on RV32 can be lowered without scalar
2961   // legalization if the most-significant 32 bits of the value are not affected
2962   // by the sign-extension of the lower 32 bits.
2963   // TODO: We could also catch sign extensions of a 32-bit value.
2964   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
2965     const auto *CVal = cast<ConstantSDNode>(Val);
2966     if (isInt<32>(CVal->getSExtValue())) {
2967       IsLegalInsert = true;
2968       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
2969     }
2970   }
2971 
2972   SDValue Mask, VL;
2973   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2974 
2975   SDValue ValInVec;
2976 
2977   if (IsLegalInsert) {
2978     unsigned Opc =
2979         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
2980     if (isNullConstant(Idx)) {
2981       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
2982       if (!VecVT.isFixedLengthVector())
2983         return Vec;
2984       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
2985     }
2986     ValInVec =
2987         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
2988   } else {
2989     // On RV32, i64-element vectors must be specially handled to place the
2990     // value at element 0, by using two vslide1up instructions in sequence on
2991     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
2992     // this.
2993     SDValue One = DAG.getConstant(1, DL, XLenVT);
2994     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
2995     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
2996     MVT I32ContainerVT =
2997         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
2998     SDValue I32Mask =
2999         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3000     // Limit the active VL to two.
3001     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3002     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3003     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3004     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3005                            InsertI64VL);
3006     // First slide in the hi value, then the lo in underneath it.
3007     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3008                            ValHi, I32Mask, InsertI64VL);
3009     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3010                            ValLo, I32Mask, InsertI64VL);
3011     // Bitcast back to the right container type.
3012     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3013   }
3014 
3015   // Now that the value is in a vector, slide it into position.
3016   SDValue InsertVL =
3017       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3018   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3019                                 ValInVec, Idx, Mask, InsertVL);
3020   if (!VecVT.isFixedLengthVector())
3021     return Slideup;
3022   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3023 }
3024 
3025 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3026 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3027 // types this is done using VMV_X_S to allow us to glean information about the
3028 // sign bits of the result.
3029 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3030                                                      SelectionDAG &DAG) const {
3031   SDLoc DL(Op);
3032   SDValue Idx = Op.getOperand(1);
3033   SDValue Vec = Op.getOperand(0);
3034   EVT EltVT = Op.getValueType();
3035   MVT VecVT = Vec.getSimpleValueType();
3036   MVT XLenVT = Subtarget.getXLenVT();
3037 
3038   if (VecVT.getVectorElementType() == MVT::i1) {
3039     // FIXME: For now we just promote to an i8 vector and extract from that,
3040     // but this is probably not optimal.
3041     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3042     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3043     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3044   }
3045 
3046   // If this is a fixed vector, we need to convert it to a scalable vector.
3047   MVT ContainerVT = VecVT;
3048   if (VecVT.isFixedLengthVector()) {
3049     ContainerVT = getContainerForFixedLengthVector(VecVT);
3050     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3051   }
3052 
3053   // If the index is 0, the vector is already in the right position.
3054   if (!isNullConstant(Idx)) {
3055     // Use a VL of 1 to avoid processing more elements than we need.
3056     SDValue VL = DAG.getConstant(1, DL, XLenVT);
3057     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3058     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3059     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3060                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3061   }
3062 
3063   if (!EltVT.isInteger()) {
3064     // Floating-point extracts are handled in TableGen.
3065     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3066                        DAG.getConstant(0, DL, XLenVT));
3067   }
3068 
3069   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3070   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3071 }
3072 
3073 // Some RVV intrinsics may claim that they want an integer operand to be
3074 // promoted or expanded.
3075 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3076                                           const RISCVSubtarget &Subtarget) {
3077   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3078           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3079          "Unexpected opcode");
3080 
3081   if (!Subtarget.hasStdExtV())
3082     return SDValue();
3083 
3084   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3085   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3086   SDLoc DL(Op);
3087 
3088   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3089       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3090   if (!II || !II->SplatOperand)
3091     return SDValue();
3092 
3093   unsigned SplatOp = II->SplatOperand + HasChain;
3094   assert(SplatOp < Op.getNumOperands());
3095 
3096   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3097   SDValue &ScalarOp = Operands[SplatOp];
3098   MVT OpVT = ScalarOp.getSimpleValueType();
3099   MVT XLenVT = Subtarget.getXLenVT();
3100 
3101   // If this isn't a scalar, or its type is XLenVT we're done.
3102   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3103     return SDValue();
3104 
3105   // Simplest case is that the operand needs to be promoted to XLenVT.
3106   if (OpVT.bitsLT(XLenVT)) {
3107     // If the operand is a constant, sign extend to increase our chances
3108     // of being able to use a .vi instruction. ANY_EXTEND would become a
3109     // a zero extend and the simm5 check in isel would fail.
3110     // FIXME: Should we ignore the upper bits in isel instead?
3111     unsigned ExtOpc =
3112         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3113     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3114     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3115   }
3116 
3117   // Use the previous operand to get the vXi64 VT. The result might be a mask
3118   // VT for compares. Using the previous operand assumes that the previous
3119   // operand will never have a smaller element size than a scalar operand and
3120   // that a widening operation never uses SEW=64.
3121   // NOTE: If this fails the below assert, we can probably just find the
3122   // element count from any operand or result and use it to construct the VT.
3123   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3124   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3125 
3126   // The more complex case is when the scalar is larger than XLenVT.
3127   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3128          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3129 
3130   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3131   // on the instruction to sign-extend since SEW>XLEN.
3132   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3133     if (isInt<32>(CVal->getSExtValue())) {
3134       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3135       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3136     }
3137   }
3138 
3139   // We need to convert the scalar to a splat vector.
3140   // FIXME: Can we implicitly truncate the scalar if it is known to
3141   // be sign extended?
3142   // VL should be the last operand.
3143   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3144   assert(VL.getValueType() == XLenVT);
3145   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG, Subtarget);
3146   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3147 }
3148 
3149 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3150                                                      SelectionDAG &DAG) const {
3151   unsigned IntNo = Op.getConstantOperandVal(0);
3152   SDLoc DL(Op);
3153   MVT XLenVT = Subtarget.getXLenVT();
3154 
3155   switch (IntNo) {
3156   default:
3157     break; // Don't custom lower most intrinsics.
3158   case Intrinsic::thread_pointer: {
3159     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3160     return DAG.getRegister(RISCV::X4, PtrVT);
3161   }
3162   case Intrinsic::riscv_orc_b:
3163     // Lower to the GORCI encoding for orc.b.
3164     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3165                        DAG.getConstant(7, DL, XLenVT));
3166   case Intrinsic::riscv_grev:
3167   case Intrinsic::riscv_gorc: {
3168     unsigned Opc =
3169         IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3170     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3171   }
3172   case Intrinsic::riscv_shfl:
3173   case Intrinsic::riscv_unshfl: {
3174     unsigned Opc =
3175         IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3176     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3177   }
3178   case Intrinsic::riscv_bcompress:
3179   case Intrinsic::riscv_bdecompress: {
3180     unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3181                                                        : RISCVISD::BDECOMPRESS;
3182     return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3183   }
3184   case Intrinsic::riscv_vmv_x_s:
3185     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3186     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3187                        Op.getOperand(1));
3188   case Intrinsic::riscv_vmv_v_x:
3189     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3190                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3191   case Intrinsic::riscv_vfmv_v_f:
3192     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3193                        Op.getOperand(1), Op.getOperand(2));
3194   case Intrinsic::riscv_vmv_s_x: {
3195     SDValue Scalar = Op.getOperand(2);
3196 
3197     if (Scalar.getValueType().bitsLE(XLenVT)) {
3198       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3199       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3200                          Op.getOperand(1), Scalar, Op.getOperand(3));
3201     }
3202 
3203     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3204 
3205     // This is an i64 value that lives in two scalar registers. We have to
3206     // insert this in a convoluted way. First we build vXi64 splat containing
3207     // the/ two values that we assemble using some bit math. Next we'll use
3208     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3209     // to merge element 0 from our splat into the source vector.
3210     // FIXME: This is probably not the best way to do this, but it is
3211     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3212     // point.
3213     //   vmv.v.x vX, hi
3214     //   vsll.vx vX, vX, /*32*/
3215     //   vmv.v.x vY, lo
3216     //   vsll.vx vY, vY, /*32*/
3217     //   vsrl.vx vY, vY, /*32*/
3218     //   vor.vv vX, vX, vY
3219     //
3220     //   vid.v      vVid
3221     //   vmseq.vx   mMask, vVid, 0
3222     //   vmerge.vvm vDest, vSrc, vVal, mMask
3223     MVT VT = Op.getSimpleValueType();
3224     SDValue Vec = Op.getOperand(1);
3225     SDValue VL = Op.getOperand(3);
3226 
3227     SDValue SplattedVal =
3228         splatSplitI64WithVL(DL, VT, Scalar, VL, DAG, Subtarget);
3229     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3230                                       DAG.getConstant(0, DL, MVT::i32), VL);
3231 
3232     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3233     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3234     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3235     SDValue SelectCond =
3236         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3237                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3238     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3239                        Vec, VL);
3240   }
3241   case Intrinsic::riscv_vslide1up:
3242   case Intrinsic::riscv_vslide1down:
3243   case Intrinsic::riscv_vslide1up_mask:
3244   case Intrinsic::riscv_vslide1down_mask: {
3245     // We need to special case these when the scalar is larger than XLen.
3246     unsigned NumOps = Op.getNumOperands();
3247     bool IsMasked = NumOps == 6;
3248     unsigned OpOffset = IsMasked ? 1 : 0;
3249     SDValue Scalar = Op.getOperand(2 + OpOffset);
3250     if (Scalar.getValueType().bitsLE(XLenVT))
3251       break;
3252 
3253     // Splatting a sign extended constant is fine.
3254     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3255       if (isInt<32>(CVal->getSExtValue()))
3256         break;
3257 
3258     MVT VT = Op.getSimpleValueType();
3259     assert(VT.getVectorElementType() == MVT::i64 &&
3260            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3261 
3262     // Convert the vector source to the equivalent nxvXi32 vector.
3263     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3264     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3265 
3266     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3267                                    DAG.getConstant(0, DL, XLenVT));
3268     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3269                                    DAG.getConstant(1, DL, XLenVT));
3270 
3271     // Double the VL since we halved SEW.
3272     SDValue VL = Op.getOperand(NumOps - 1);
3273     SDValue I32VL =
3274         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3275 
3276     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3277     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3278 
3279     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3280     // instructions.
3281     if (IntNo == Intrinsic::riscv_vslide1up ||
3282         IntNo == Intrinsic::riscv_vslide1up_mask) {
3283       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3284                         I32Mask, I32VL);
3285       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3286                         I32Mask, I32VL);
3287     } else {
3288       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3289                         I32Mask, I32VL);
3290       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3291                         I32Mask, I32VL);
3292     }
3293 
3294     // Convert back to nxvXi64.
3295     Vec = DAG.getBitcast(VT, Vec);
3296 
3297     if (!IsMasked)
3298       return Vec;
3299 
3300     // Apply mask after the operation.
3301     SDValue Mask = Op.getOperand(NumOps - 2);
3302     SDValue MaskedOff = Op.getOperand(1);
3303     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3304   }
3305   }
3306 
3307   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3308 }
3309 
3310 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3311                                                     SelectionDAG &DAG) const {
3312   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3313 }
3314 
3315 static MVT getLMUL1VT(MVT VT) {
3316   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3317          "Unexpected vector MVT");
3318   return MVT::getScalableVectorVT(
3319       VT.getVectorElementType(),
3320       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3321 }
3322 
3323 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3324   switch (ISDOpcode) {
3325   default:
3326     llvm_unreachable("Unhandled reduction");
3327   case ISD::VECREDUCE_ADD:
3328     return RISCVISD::VECREDUCE_ADD_VL;
3329   case ISD::VECREDUCE_UMAX:
3330     return RISCVISD::VECREDUCE_UMAX_VL;
3331   case ISD::VECREDUCE_SMAX:
3332     return RISCVISD::VECREDUCE_SMAX_VL;
3333   case ISD::VECREDUCE_UMIN:
3334     return RISCVISD::VECREDUCE_UMIN_VL;
3335   case ISD::VECREDUCE_SMIN:
3336     return RISCVISD::VECREDUCE_SMIN_VL;
3337   case ISD::VECREDUCE_AND:
3338     return RISCVISD::VECREDUCE_AND_VL;
3339   case ISD::VECREDUCE_OR:
3340     return RISCVISD::VECREDUCE_OR_VL;
3341   case ISD::VECREDUCE_XOR:
3342     return RISCVISD::VECREDUCE_XOR_VL;
3343   }
3344 }
3345 
3346 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3347                                                       SelectionDAG &DAG) const {
3348   SDLoc DL(Op);
3349   SDValue Vec = Op.getOperand(0);
3350   MVT VecVT = Vec.getSimpleValueType();
3351   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3352           Op.getOpcode() == ISD::VECREDUCE_OR ||
3353           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3354          "Unexpected reduction lowering");
3355 
3356   MVT XLenVT = Subtarget.getXLenVT();
3357   assert(Op.getValueType() == XLenVT &&
3358          "Expected reduction output to be legalized to XLenVT");
3359 
3360   MVT ContainerVT = VecVT;
3361   if (VecVT.isFixedLengthVector()) {
3362     ContainerVT = getContainerForFixedLengthVector(VecVT);
3363     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3364   }
3365 
3366   SDValue Mask, VL;
3367   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3368   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3369 
3370   switch (Op.getOpcode()) {
3371   default:
3372     llvm_unreachable("Unhandled reduction");
3373   case ISD::VECREDUCE_AND:
3374     // vpopc ~x == 0
3375     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3376     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3377     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3378   case ISD::VECREDUCE_OR:
3379     // vpopc x != 0
3380     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3381     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3382   case ISD::VECREDUCE_XOR: {
3383     // ((vpopc x) & 1) != 0
3384     SDValue One = DAG.getConstant(1, DL, XLenVT);
3385     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3386     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3387     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3388   }
3389   }
3390 }
3391 
3392 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3393                                             SelectionDAG &DAG) const {
3394   SDLoc DL(Op);
3395   SDValue Vec = Op.getOperand(0);
3396   EVT VecEVT = Vec.getValueType();
3397 
3398   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3399 
3400   // Due to ordering in legalize types we may have a vector type that needs to
3401   // be split. Do that manually so we can get down to a legal type.
3402   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3403          TargetLowering::TypeSplitVector) {
3404     SDValue Lo, Hi;
3405     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3406     VecEVT = Lo.getValueType();
3407     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3408   }
3409 
3410   // TODO: The type may need to be widened rather than split. Or widened before
3411   // it can be split.
3412   if (!isTypeLegal(VecEVT))
3413     return SDValue();
3414 
3415   MVT VecVT = VecEVT.getSimpleVT();
3416   MVT VecEltVT = VecVT.getVectorElementType();
3417   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3418 
3419   MVT ContainerVT = VecVT;
3420   if (VecVT.isFixedLengthVector()) {
3421     ContainerVT = getContainerForFixedLengthVector(VecVT);
3422     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3423   }
3424 
3425   MVT M1VT = getLMUL1VT(ContainerVT);
3426 
3427   SDValue Mask, VL;
3428   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3429 
3430   // FIXME: This is a VLMAX splat which might be too large and can prevent
3431   // vsetvli removal.
3432   SDValue NeutralElem =
3433       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3434   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3435   SDValue Reduction =
3436       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3437   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3438                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3439   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3440 }
3441 
3442 // Given a reduction op, this function returns the matching reduction opcode,
3443 // the vector SDValue and the scalar SDValue required to lower this to a
3444 // RISCVISD node.
3445 static std::tuple<unsigned, SDValue, SDValue>
3446 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3447   SDLoc DL(Op);
3448   switch (Op.getOpcode()) {
3449   default:
3450     llvm_unreachable("Unhandled reduction");
3451   case ISD::VECREDUCE_FADD:
3452     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3453                            DAG.getConstantFP(0.0, DL, EltVT));
3454   case ISD::VECREDUCE_SEQ_FADD:
3455     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3456                            Op.getOperand(0));
3457   }
3458 }
3459 
3460 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3461                                               SelectionDAG &DAG) const {
3462   SDLoc DL(Op);
3463   MVT VecEltVT = Op.getSimpleValueType();
3464 
3465   unsigned RVVOpcode;
3466   SDValue VectorVal, ScalarVal;
3467   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3468       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3469   MVT VecVT = VectorVal.getSimpleValueType();
3470 
3471   MVT ContainerVT = VecVT;
3472   if (VecVT.isFixedLengthVector()) {
3473     ContainerVT = getContainerForFixedLengthVector(VecVT);
3474     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3475   }
3476 
3477   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3478 
3479   SDValue Mask, VL;
3480   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3481 
3482   // FIXME: This is a VLMAX splat which might be too large and can prevent
3483   // vsetvli removal.
3484   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3485   SDValue Reduction =
3486       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3487   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3488                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3489 }
3490 
3491 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3492                                                    SelectionDAG &DAG) const {
3493   SDValue Vec = Op.getOperand(0);
3494   SDValue SubVec = Op.getOperand(1);
3495   MVT VecVT = Vec.getSimpleValueType();
3496   MVT SubVecVT = SubVec.getSimpleValueType();
3497 
3498   SDLoc DL(Op);
3499   MVT XLenVT = Subtarget.getXLenVT();
3500   unsigned OrigIdx = Op.getConstantOperandVal(2);
3501   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3502 
3503   // We don't have the ability to slide mask vectors up indexed by their i1
3504   // elements; the smallest we can do is i8. Often we are able to bitcast to
3505   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3506   // into a scalable one, we might not necessarily have enough scalable
3507   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3508   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3509       (OrigIdx != 0 || !Vec.isUndef())) {
3510     if (VecVT.getVectorMinNumElements() >= 8 &&
3511         SubVecVT.getVectorMinNumElements() >= 8) {
3512       assert(OrigIdx % 8 == 0 && "Invalid index");
3513       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3514              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3515              "Unexpected mask vector lowering");
3516       OrigIdx /= 8;
3517       SubVecVT =
3518           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3519                            SubVecVT.isScalableVector());
3520       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3521                                VecVT.isScalableVector());
3522       Vec = DAG.getBitcast(VecVT, Vec);
3523       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3524     } else {
3525       // We can't slide this mask vector up indexed by its i1 elements.
3526       // This poses a problem when we wish to insert a scalable vector which
3527       // can't be re-expressed as a larger type. Just choose the slow path and
3528       // extend to a larger type, then truncate back down.
3529       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3530       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3531       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3532       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3533       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3534                         Op.getOperand(2));
3535       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3536       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3537     }
3538   }
3539 
3540   // If the subvector vector is a fixed-length type, we cannot use subregister
3541   // manipulation to simplify the codegen; we don't know which register of a
3542   // LMUL group contains the specific subvector as we only know the minimum
3543   // register size. Therefore we must slide the vector group up the full
3544   // amount.
3545   if (SubVecVT.isFixedLengthVector()) {
3546     if (OrigIdx == 0 && Vec.isUndef())
3547       return Op;
3548     MVT ContainerVT = VecVT;
3549     if (VecVT.isFixedLengthVector()) {
3550       ContainerVT = getContainerForFixedLengthVector(VecVT);
3551       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3552     }
3553     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3554                          DAG.getUNDEF(ContainerVT), SubVec,
3555                          DAG.getConstant(0, DL, XLenVT));
3556     SDValue Mask =
3557         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3558     // Set the vector length to only the number of elements we care about. Note
3559     // that for slideup this includes the offset.
3560     SDValue VL =
3561         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3562     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3563     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3564                                   SubVec, SlideupAmt, Mask, VL);
3565     if (VecVT.isFixedLengthVector())
3566       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3567     return DAG.getBitcast(Op.getValueType(), Slideup);
3568   }
3569 
3570   unsigned SubRegIdx, RemIdx;
3571   std::tie(SubRegIdx, RemIdx) =
3572       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3573           VecVT, SubVecVT, OrigIdx, TRI);
3574 
3575   RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3576   bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
3577                          SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
3578                          SubVecLMUL == RISCVVLMUL::LMUL_F8;
3579 
3580   // 1. If the Idx has been completely eliminated and this subvector's size is
3581   // a vector register or a multiple thereof, or the surrounding elements are
3582   // undef, then this is a subvector insert which naturally aligns to a vector
3583   // register. These can easily be handled using subregister manipulation.
3584   // 2. If the subvector is smaller than a vector register, then the insertion
3585   // must preserve the undisturbed elements of the register. We do this by
3586   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3587   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3588   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3589   // LMUL=1 type back into the larger vector (resolving to another subregister
3590   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3591   // to avoid allocating a large register group to hold our subvector.
3592   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3593     return Op;
3594 
3595   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3596   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3597   // (in our case undisturbed). This means we can set up a subvector insertion
3598   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3599   // size of the subvector.
3600   MVT InterSubVT = VecVT;
3601   SDValue AlignedExtract = Vec;
3602   unsigned AlignedIdx = OrigIdx - RemIdx;
3603   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3604     InterSubVT = getLMUL1VT(VecVT);
3605     // Extract a subvector equal to the nearest full vector register type. This
3606     // should resolve to a EXTRACT_SUBREG instruction.
3607     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3608                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3609   }
3610 
3611   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3612   // For scalable vectors this must be further multiplied by vscale.
3613   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3614 
3615   SDValue Mask, VL;
3616   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3617 
3618   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3619   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3620   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3621   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3622 
3623   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3624                        DAG.getUNDEF(InterSubVT), SubVec,
3625                        DAG.getConstant(0, DL, XLenVT));
3626 
3627   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3628                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3629 
3630   // If required, insert this subvector back into the correct vector register.
3631   // This should resolve to an INSERT_SUBREG instruction.
3632   if (VecVT.bitsGT(InterSubVT))
3633     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3634                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3635 
3636   // We might have bitcast from a mask type: cast back to the original type if
3637   // required.
3638   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3639 }
3640 
3641 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3642                                                     SelectionDAG &DAG) const {
3643   SDValue Vec = Op.getOperand(0);
3644   MVT SubVecVT = Op.getSimpleValueType();
3645   MVT VecVT = Vec.getSimpleValueType();
3646 
3647   SDLoc DL(Op);
3648   MVT XLenVT = Subtarget.getXLenVT();
3649   unsigned OrigIdx = Op.getConstantOperandVal(1);
3650   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3651 
3652   // We don't have the ability to slide mask vectors down indexed by their i1
3653   // elements; the smallest we can do is i8. Often we are able to bitcast to
3654   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3655   // from a scalable one, we might not necessarily have enough scalable
3656   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3657   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3658     if (VecVT.getVectorMinNumElements() >= 8 &&
3659         SubVecVT.getVectorMinNumElements() >= 8) {
3660       assert(OrigIdx % 8 == 0 && "Invalid index");
3661       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3662              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3663              "Unexpected mask vector lowering");
3664       OrigIdx /= 8;
3665       SubVecVT =
3666           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3667                            SubVecVT.isScalableVector());
3668       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3669                                VecVT.isScalableVector());
3670       Vec = DAG.getBitcast(VecVT, Vec);
3671     } else {
3672       // We can't slide this mask vector down, indexed by its i1 elements.
3673       // This poses a problem when we wish to extract a scalable vector which
3674       // can't be re-expressed as a larger type. Just choose the slow path and
3675       // extend to a larger type, then truncate back down.
3676       // TODO: We could probably improve this when extracting certain fixed
3677       // from fixed, where we can extract as i8 and shift the correct element
3678       // right to reach the desired subvector?
3679       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3680       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3681       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3682       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3683                         Op.getOperand(1));
3684       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3685       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3686     }
3687   }
3688 
3689   // If the subvector vector is a fixed-length type, we cannot use subregister
3690   // manipulation to simplify the codegen; we don't know which register of a
3691   // LMUL group contains the specific subvector as we only know the minimum
3692   // register size. Therefore we must slide the vector group down the full
3693   // amount.
3694   if (SubVecVT.isFixedLengthVector()) {
3695     // With an index of 0 this is a cast-like subvector, which can be performed
3696     // with subregister operations.
3697     if (OrigIdx == 0)
3698       return Op;
3699     MVT ContainerVT = VecVT;
3700     if (VecVT.isFixedLengthVector()) {
3701       ContainerVT = getContainerForFixedLengthVector(VecVT);
3702       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3703     }
3704     SDValue Mask =
3705         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3706     // Set the vector length to only the number of elements we care about. This
3707     // avoids sliding down elements we're going to discard straight away.
3708     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3709     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3710     SDValue Slidedown =
3711         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3712                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3713     // Now we can use a cast-like subvector extract to get the result.
3714     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3715                             DAG.getConstant(0, DL, XLenVT));
3716     return DAG.getBitcast(Op.getValueType(), Slidedown);
3717   }
3718 
3719   unsigned SubRegIdx, RemIdx;
3720   std::tie(SubRegIdx, RemIdx) =
3721       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3722           VecVT, SubVecVT, OrigIdx, TRI);
3723 
3724   // If the Idx has been completely eliminated then this is a subvector extract
3725   // which naturally aligns to a vector register. These can easily be handled
3726   // using subregister manipulation.
3727   if (RemIdx == 0)
3728     return Op;
3729 
3730   // Else we must shift our vector register directly to extract the subvector.
3731   // Do this using VSLIDEDOWN.
3732 
3733   // If the vector type is an LMUL-group type, extract a subvector equal to the
3734   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3735   // instruction.
3736   MVT InterSubVT = VecVT;
3737   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3738     InterSubVT = getLMUL1VT(VecVT);
3739     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3740                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3741   }
3742 
3743   // Slide this vector register down by the desired number of elements in order
3744   // to place the desired subvector starting at element 0.
3745   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3746   // For scalable vectors this must be further multiplied by vscale.
3747   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3748 
3749   SDValue Mask, VL;
3750   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3751   SDValue Slidedown =
3752       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3753                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3754 
3755   // Now the vector is in the right position, extract our final subvector. This
3756   // should resolve to a COPY.
3757   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3758                           DAG.getConstant(0, DL, XLenVT));
3759 
3760   // We might have bitcast from a mask type: cast back to the original type if
3761   // required.
3762   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3763 }
3764 
3765 // Implement step_vector to the vid instruction.
3766 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
3767                                               SelectionDAG &DAG) const {
3768   SDLoc DL(Op);
3769   assert(Op.getConstantOperandAPInt(0) == 1 && "Unexpected step value");
3770   MVT VT = Op.getSimpleValueType();
3771   SDValue Mask, VL;
3772   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
3773   return DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3774 }
3775 
3776 // Implement vector_reverse using vrgather.vv with indices determined by
3777 // subtracting the id of each element from (VLMAX-1). This will convert
3778 // the indices like so:
3779 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3780 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3781 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3782                                                  SelectionDAG &DAG) const {
3783   SDLoc DL(Op);
3784   MVT VecVT = Op.getSimpleValueType();
3785   unsigned EltSize = VecVT.getScalarSizeInBits();
3786   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3787 
3788   unsigned MaxVLMAX = 0;
3789   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3790   if (VectorBitsMax != 0)
3791     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3792 
3793   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3794   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3795 
3796   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3797   // to use vrgatherei16.vv.
3798   // TODO: It's also possible to use vrgatherei16.vv for other types to
3799   // decrease register width for the index calculation.
3800   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3801     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3802     // Reverse each half, then reassemble them in reverse order.
3803     // NOTE: It's also possible that after splitting that VLMAX no longer
3804     // requires vrgatherei16.vv.
3805     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3806       SDValue Lo, Hi;
3807       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3808       EVT LoVT, HiVT;
3809       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3810       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3811       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3812       // Reassemble the low and high pieces reversed.
3813       // FIXME: This is a CONCAT_VECTORS.
3814       SDValue Res =
3815           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3816                       DAG.getIntPtrConstant(0, DL));
3817       return DAG.getNode(
3818           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3819           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3820     }
3821 
3822     // Just promote the int type to i16 which will double the LMUL.
3823     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
3824     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
3825   }
3826 
3827   MVT XLenVT = Subtarget.getXLenVT();
3828   SDValue Mask, VL;
3829   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3830 
3831   // Calculate VLMAX-1 for the desired SEW.
3832   unsigned MinElts = VecVT.getVectorMinNumElements();
3833   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
3834                               DAG.getConstant(MinElts, DL, XLenVT));
3835   SDValue VLMinus1 =
3836       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
3837 
3838   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
3839   bool IsRV32E64 =
3840       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
3841   SDValue SplatVL;
3842   if (!IsRV32E64)
3843     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
3844   else
3845     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
3846 
3847   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
3848   SDValue Indices =
3849       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
3850 
3851   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
3852 }
3853 
3854 SDValue
3855 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
3856                                                      SelectionDAG &DAG) const {
3857   auto *Load = cast<LoadSDNode>(Op);
3858 
3859   SDLoc DL(Op);
3860   MVT VT = Op.getSimpleValueType();
3861   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3862 
3863   SDValue VL =
3864       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3865 
3866   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3867   SDValue NewLoad = DAG.getMemIntrinsicNode(
3868       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
3869       Load->getMemoryVT(), Load->getMemOperand());
3870 
3871   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
3872   return DAG.getMergeValues({Result, Load->getChain()}, DL);
3873 }
3874 
3875 SDValue
3876 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
3877                                                       SelectionDAG &DAG) const {
3878   auto *Store = cast<StoreSDNode>(Op);
3879 
3880   SDLoc DL(Op);
3881   SDValue StoreVal = Store->getValue();
3882   MVT VT = StoreVal.getSimpleValueType();
3883 
3884   // If the size less than a byte, we need to pad with zeros to make a byte.
3885   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
3886     VT = MVT::v8i1;
3887     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
3888                            DAG.getConstant(0, DL, VT), StoreVal,
3889                            DAG.getIntPtrConstant(0, DL));
3890   }
3891 
3892   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3893 
3894   SDValue VL =
3895       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3896 
3897   SDValue NewValue =
3898       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
3899   return DAG.getMemIntrinsicNode(
3900       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
3901       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
3902       Store->getMemoryVT(), Store->getMemOperand());
3903 }
3904 
3905 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3906   auto *Load = cast<MaskedLoadSDNode>(Op);
3907 
3908   SDLoc DL(Op);
3909   MVT VT = Op.getSimpleValueType();
3910   MVT XLenVT = Subtarget.getXLenVT();
3911 
3912   SDValue Mask = Load->getMask();
3913   SDValue PassThru = Load->getPassThru();
3914   SDValue VL;
3915 
3916   MVT ContainerVT = VT;
3917   if (VT.isFixedLengthVector()) {
3918     ContainerVT = getContainerForFixedLengthVector(VT);
3919     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3920 
3921     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3922     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
3923     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3924   } else
3925     VL = DAG.getRegister(RISCV::X0, XLenVT);
3926 
3927   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3928   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
3929   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
3930                    Load->getBasePtr(), Mask,  VL};
3931   SDValue Result =
3932       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
3933                               Load->getMemoryVT(), Load->getMemOperand());
3934   SDValue Chain = Result.getValue(1);
3935 
3936   if (VT.isFixedLengthVector())
3937     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3938 
3939   return DAG.getMergeValues({Result, Chain}, DL);
3940 }
3941 
3942 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
3943   auto *Store = cast<MaskedStoreSDNode>(Op);
3944 
3945   SDLoc DL(Op);
3946   SDValue Val = Store->getValue();
3947   SDValue Mask = Store->getMask();
3948   MVT VT = Val.getSimpleValueType();
3949   MVT XLenVT = Subtarget.getXLenVT();
3950   SDValue VL;
3951 
3952   MVT ContainerVT = VT;
3953   if (VT.isFixedLengthVector()) {
3954     ContainerVT = getContainerForFixedLengthVector(VT);
3955     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3956 
3957     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
3958     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3959     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3960   } else
3961     VL = DAG.getRegister(RISCV::X0, XLenVT);
3962 
3963   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
3964   return DAG.getMemIntrinsicNode(
3965       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
3966       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
3967       Store->getMemoryVT(), Store->getMemOperand());
3968 }
3969 
3970 SDValue
3971 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
3972                                                       SelectionDAG &DAG) const {
3973   MVT InVT = Op.getOperand(0).getSimpleValueType();
3974   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
3975 
3976   MVT VT = Op.getSimpleValueType();
3977 
3978   SDValue Op1 =
3979       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3980   SDValue Op2 =
3981       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
3982 
3983   SDLoc DL(Op);
3984   SDValue VL =
3985       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3986 
3987   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3988   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3989 
3990   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
3991                             Op.getOperand(2), Mask, VL);
3992 
3993   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
3994 }
3995 
3996 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
3997     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
3998   MVT VT = Op.getSimpleValueType();
3999 
4000   if (VT.getVectorElementType() == MVT::i1)
4001     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4002 
4003   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4004 }
4005 
4006 // Lower vector ABS to smax(X, sub(0, X)).
4007 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4008   SDLoc DL(Op);
4009   MVT VT = Op.getSimpleValueType();
4010   SDValue X = Op.getOperand(0);
4011 
4012   assert(VT.isFixedLengthVector() && "Unexpected type");
4013 
4014   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4015   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4016 
4017   SDValue Mask, VL;
4018   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4019 
4020   SDValue SplatZero =
4021       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4022                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4023   SDValue NegX =
4024       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
4025   SDValue Max =
4026       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
4027 
4028   return convertFromScalableVector(VT, Max, DAG, Subtarget);
4029 }
4030 
4031 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
4032     SDValue Op, SelectionDAG &DAG) const {
4033   SDLoc DL(Op);
4034   MVT VT = Op.getSimpleValueType();
4035   SDValue Mag = Op.getOperand(0);
4036   SDValue Sign = Op.getOperand(1);
4037   assert(Mag.getValueType() == Sign.getValueType() &&
4038          "Can only handle COPYSIGN with matching types.");
4039 
4040   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4041   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
4042   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
4043 
4044   SDValue Mask, VL;
4045   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4046 
4047   SDValue CopySign =
4048       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
4049 
4050   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
4051 }
4052 
4053 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
4054     SDValue Op, SelectionDAG &DAG) const {
4055   MVT VT = Op.getSimpleValueType();
4056   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4057 
4058   MVT I1ContainerVT =
4059       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4060 
4061   SDValue CC =
4062       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
4063   SDValue Op1 =
4064       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4065   SDValue Op2 =
4066       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
4067 
4068   SDLoc DL(Op);
4069   SDValue Mask, VL;
4070   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4071 
4072   SDValue Select =
4073       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
4074 
4075   return convertFromScalableVector(VT, Select, DAG, Subtarget);
4076 }
4077 
4078 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
4079                                                unsigned NewOpc,
4080                                                bool HasMask) const {
4081   MVT VT = Op.getSimpleValueType();
4082   MVT ContainerVT = getContainerForFixedLengthVector(VT);
4083 
4084   // Create list of operands by converting existing ones to scalable types.
4085   SmallVector<SDValue, 6> Ops;
4086   for (const SDValue &V : Op->op_values()) {
4087     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
4088 
4089     // Pass through non-vector operands.
4090     if (!V.getValueType().isVector()) {
4091       Ops.push_back(V);
4092       continue;
4093     }
4094 
4095     // "cast" fixed length vector to a scalable vector.
4096     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4097            "Only fixed length vectors are supported!");
4098     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4099   }
4100 
4101   SDLoc DL(Op);
4102   SDValue Mask, VL;
4103   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4104   if (HasMask)
4105     Ops.push_back(Mask);
4106   Ops.push_back(VL);
4107 
4108   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4109   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4110 }
4111 
4112 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4113 // a RVV indexed load. The RVV indexed load instructions only support the
4114 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4115 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4116 // indexing is extended to the XLEN value type and scaled accordingly.
4117 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4118   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4119   SDLoc DL(Op);
4120 
4121   SDValue Index = MGN->getIndex();
4122   SDValue Mask = MGN->getMask();
4123   SDValue PassThru = MGN->getPassThru();
4124 
4125   MVT VT = Op.getSimpleValueType();
4126   MVT IndexVT = Index.getSimpleValueType();
4127   MVT XLenVT = Subtarget.getXLenVT();
4128 
4129   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4130          "Unexpected VTs!");
4131   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4132          "Unexpected pointer type");
4133   // Targets have to explicitly opt-in for extending vector loads.
4134   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4135          "Unexpected extending MGATHER");
4136 
4137   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4138   // the selection of the masked intrinsics doesn't do this for us.
4139   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4140 
4141   SDValue VL;
4142   MVT ContainerVT = VT;
4143   if (VT.isFixedLengthVector()) {
4144     // We need to use the larger of the result and index type to determine the
4145     // scalable type to use so we don't increase LMUL for any operand/result.
4146     if (VT.bitsGE(IndexVT)) {
4147       ContainerVT = getContainerForFixedLengthVector(VT);
4148       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4149                                  ContainerVT.getVectorElementCount());
4150     } else {
4151       IndexVT = getContainerForFixedLengthVector(IndexVT);
4152       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4153                                      IndexVT.getVectorElementCount());
4154     }
4155 
4156     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4157 
4158     if (!IsUnmasked) {
4159       MVT MaskVT =
4160           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4161       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4162       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4163     }
4164 
4165     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4166   } else
4167     VL = DAG.getRegister(RISCV::X0, XLenVT);
4168 
4169   unsigned IntID =
4170       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4171   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4172                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4173   if (!IsUnmasked)
4174     Ops.push_back(PassThru);
4175   Ops.push_back(MGN->getBasePtr());
4176   Ops.push_back(Index);
4177   if (!IsUnmasked)
4178     Ops.push_back(Mask);
4179   Ops.push_back(VL);
4180 
4181   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4182   SDValue Result =
4183       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4184                               MGN->getMemoryVT(), MGN->getMemOperand());
4185   SDValue Chain = Result.getValue(1);
4186 
4187   if (VT.isFixedLengthVector())
4188     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4189 
4190   return DAG.getMergeValues({Result, Chain}, DL);
4191 }
4192 
4193 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4194 // a RVV indexed store. The RVV indexed store instructions only support the
4195 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4196 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4197 // indexing is extended to the XLEN value type and scaled accordingly.
4198 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4199                                            SelectionDAG &DAG) const {
4200   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4201   SDLoc DL(Op);
4202   SDValue Index = MSN->getIndex();
4203   SDValue Mask = MSN->getMask();
4204   SDValue Val = MSN->getValue();
4205 
4206   MVT VT = Val.getSimpleValueType();
4207   MVT IndexVT = Index.getSimpleValueType();
4208   MVT XLenVT = Subtarget.getXLenVT();
4209 
4210   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4211          "Unexpected VTs!");
4212   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4213          "Unexpected pointer type");
4214   // Targets have to explicitly opt-in for extending vector loads and
4215   // truncating vector stores.
4216   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4217 
4218   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4219   // the selection of the masked intrinsics doesn't do this for us.
4220   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4221 
4222   SDValue VL;
4223   if (VT.isFixedLengthVector()) {
4224     // We need to use the larger of the value and index type to determine the
4225     // scalable type to use so we don't increase LMUL for any operand/result.
4226     if (VT.bitsGE(IndexVT)) {
4227       VT = getContainerForFixedLengthVector(VT);
4228       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4229                                  VT.getVectorElementCount());
4230     } else {
4231       IndexVT = getContainerForFixedLengthVector(IndexVT);
4232       VT = MVT::getVectorVT(VT.getVectorElementType(),
4233                             IndexVT.getVectorElementCount());
4234     }
4235 
4236     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4237     Val = convertToScalableVector(VT, Val, DAG, Subtarget);
4238 
4239     if (!IsUnmasked) {
4240       MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4241       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4242     }
4243 
4244     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4245   } else
4246     VL = DAG.getRegister(RISCV::X0, XLenVT);
4247 
4248   unsigned IntID =
4249       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4250   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4251                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4252   Ops.push_back(Val);
4253   Ops.push_back(MSN->getBasePtr());
4254   Ops.push_back(Index);
4255   if (!IsUnmasked)
4256     Ops.push_back(Mask);
4257   Ops.push_back(VL);
4258 
4259   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4260                                  MSN->getMemoryVT(), MSN->getMemOperand());
4261 }
4262 
4263 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4264                                                SelectionDAG &DAG) const {
4265   const MVT XLenVT = Subtarget.getXLenVT();
4266   SDLoc DL(Op);
4267   SDValue Chain = Op->getOperand(0);
4268   SDValue SysRegNo = DAG.getConstant(
4269       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4270   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4271   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4272 
4273   // Encoding used for rounding mode in RISCV differs from that used in
4274   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4275   // table, which consists of a sequence of 4-bit fields, each representing
4276   // corresponding FLT_ROUNDS mode.
4277   static const int Table =
4278       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4279       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4280       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4281       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4282       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4283 
4284   SDValue Shift =
4285       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4286   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4287                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4288   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4289                                DAG.getConstant(7, DL, XLenVT));
4290 
4291   return DAG.getMergeValues({Masked, Chain}, DL);
4292 }
4293 
4294 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4295                                                SelectionDAG &DAG) const {
4296   const MVT XLenVT = Subtarget.getXLenVT();
4297   SDLoc DL(Op);
4298   SDValue Chain = Op->getOperand(0);
4299   SDValue RMValue = Op->getOperand(1);
4300   SDValue SysRegNo = DAG.getConstant(
4301       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4302 
4303   // Encoding used for rounding mode in RISCV differs from that used in
4304   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4305   // a table, which consists of a sequence of 4-bit fields, each representing
4306   // corresponding RISCV mode.
4307   static const unsigned Table =
4308       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4309       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4310       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4311       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4312       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4313 
4314   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4315                               DAG.getConstant(2, DL, XLenVT));
4316   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4317                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4318   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4319                         DAG.getConstant(0x7, DL, XLenVT));
4320   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4321                      RMValue);
4322 }
4323 
4324 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4325 // form of the given Opcode.
4326 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4327   switch (Opcode) {
4328   default:
4329     llvm_unreachable("Unexpected opcode");
4330   case ISD::SHL:
4331     return RISCVISD::SLLW;
4332   case ISD::SRA:
4333     return RISCVISD::SRAW;
4334   case ISD::SRL:
4335     return RISCVISD::SRLW;
4336   case ISD::SDIV:
4337     return RISCVISD::DIVW;
4338   case ISD::UDIV:
4339     return RISCVISD::DIVUW;
4340   case ISD::UREM:
4341     return RISCVISD::REMUW;
4342   case ISD::ROTL:
4343     return RISCVISD::ROLW;
4344   case ISD::ROTR:
4345     return RISCVISD::RORW;
4346   case RISCVISD::GREV:
4347     return RISCVISD::GREVW;
4348   case RISCVISD::GORC:
4349     return RISCVISD::GORCW;
4350   }
4351 }
4352 
4353 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4354 // Because i32 isn't a legal type for RV64, these operations would otherwise
4355 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4356 // later one because the fact the operation was originally of type i32 is
4357 // lost.
4358 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4359                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4360   SDLoc DL(N);
4361   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4362   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4363   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4364   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4365   // ReplaceNodeResults requires we maintain the same type for the return value.
4366   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4367 }
4368 
4369 // Converts the given 32-bit operation to a i64 operation with signed extension
4370 // semantic to reduce the signed extension instructions.
4371 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4372   SDLoc DL(N);
4373   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4374   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4375   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4376   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4377                                DAG.getValueType(MVT::i32));
4378   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4379 }
4380 
4381 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4382                                              SmallVectorImpl<SDValue> &Results,
4383                                              SelectionDAG &DAG) const {
4384   SDLoc DL(N);
4385   switch (N->getOpcode()) {
4386   default:
4387     llvm_unreachable("Don't know how to custom type legalize this operation!");
4388   case ISD::STRICT_FP_TO_SINT:
4389   case ISD::STRICT_FP_TO_UINT:
4390   case ISD::FP_TO_SINT:
4391   case ISD::FP_TO_UINT: {
4392     bool IsStrict = N->isStrictFPOpcode();
4393     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4394            "Unexpected custom legalisation");
4395     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4396     // If the FP type needs to be softened, emit a library call using the 'si'
4397     // version. If we left it to default legalization we'd end up with 'di'. If
4398     // the FP type doesn't need to be softened just let generic type
4399     // legalization promote the result type.
4400     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4401         TargetLowering::TypeSoftenFloat)
4402       return;
4403     RTLIB::Libcall LC;
4404     if (N->getOpcode() == ISD::FP_TO_SINT ||
4405         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4406       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4407     else
4408       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4409     MakeLibCallOptions CallOptions;
4410     EVT OpVT = Op0.getValueType();
4411     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4412     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4413     SDValue Result;
4414     std::tie(Result, Chain) =
4415         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4416     Results.push_back(Result);
4417     if (IsStrict)
4418       Results.push_back(Chain);
4419     break;
4420   }
4421   case ISD::READCYCLECOUNTER: {
4422     assert(!Subtarget.is64Bit() &&
4423            "READCYCLECOUNTER only has custom type legalization on riscv32");
4424 
4425     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4426     SDValue RCW =
4427         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4428 
4429     Results.push_back(
4430         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4431     Results.push_back(RCW.getValue(2));
4432     break;
4433   }
4434   case ISD::MUL: {
4435     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4436     unsigned XLen = Subtarget.getXLen();
4437     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4438     if (Size > XLen) {
4439       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4440       SDValue LHS = N->getOperand(0);
4441       SDValue RHS = N->getOperand(1);
4442       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4443 
4444       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4445       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4446       // We need exactly one side to be unsigned.
4447       if (LHSIsU == RHSIsU)
4448         return;
4449 
4450       auto MakeMULPair = [&](SDValue S, SDValue U) {
4451         MVT XLenVT = Subtarget.getXLenVT();
4452         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4453         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4454         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4455         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4456         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4457       };
4458 
4459       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4460       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4461 
4462       // The other operand should be signed, but still prefer MULH when
4463       // possible.
4464       if (RHSIsU && LHSIsS && !RHSIsS)
4465         Results.push_back(MakeMULPair(LHS, RHS));
4466       else if (LHSIsU && RHSIsS && !LHSIsS)
4467         Results.push_back(MakeMULPair(RHS, LHS));
4468 
4469       return;
4470     }
4471     LLVM_FALLTHROUGH;
4472   }
4473   case ISD::ADD:
4474   case ISD::SUB:
4475     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4476            "Unexpected custom legalisation");
4477     if (N->getOperand(1).getOpcode() == ISD::Constant)
4478       return;
4479     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4480     break;
4481   case ISD::SHL:
4482   case ISD::SRA:
4483   case ISD::SRL:
4484     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4485            "Unexpected custom legalisation");
4486     if (N->getOperand(1).getOpcode() == ISD::Constant)
4487       return;
4488     Results.push_back(customLegalizeToWOp(N, DAG));
4489     break;
4490   case ISD::ROTL:
4491   case ISD::ROTR:
4492     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4493            "Unexpected custom legalisation");
4494     Results.push_back(customLegalizeToWOp(N, DAG));
4495     break;
4496   case ISD::CTTZ:
4497   case ISD::CTTZ_ZERO_UNDEF:
4498   case ISD::CTLZ:
4499   case ISD::CTLZ_ZERO_UNDEF: {
4500     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4501            "Unexpected custom legalisation");
4502 
4503     SDValue NewOp0 =
4504         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4505     bool IsCTZ =
4506         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4507     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4508     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4509     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4510     return;
4511   }
4512   case ISD::SDIV:
4513   case ISD::UDIV:
4514   case ISD::UREM: {
4515     MVT VT = N->getSimpleValueType(0);
4516     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4517            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4518            "Unexpected custom legalisation");
4519     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4520         N->getOperand(1).getOpcode() == ISD::Constant)
4521       return;
4522 
4523     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4524     // the upper 32 bits. For other types we need to sign or zero extend
4525     // based on the opcode.
4526     unsigned ExtOpc = ISD::ANY_EXTEND;
4527     if (VT != MVT::i32)
4528       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4529                                            : ISD::ZERO_EXTEND;
4530 
4531     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4532     break;
4533   }
4534   case ISD::UADDO:
4535   case ISD::USUBO: {
4536     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4537            "Unexpected custom legalisation");
4538     bool IsAdd = N->getOpcode() == ISD::UADDO;
4539     // Create an ADDW or SUBW.
4540     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4541     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4542     SDValue Res =
4543         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4544     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4545                       DAG.getValueType(MVT::i32));
4546 
4547     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4548     // Since the inputs are sign extended from i32, this is equivalent to
4549     // comparing the lower 32 bits.
4550     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4551     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4552                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4553 
4554     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4555     Results.push_back(Overflow);
4556     return;
4557   }
4558   case ISD::UADDSAT:
4559   case ISD::USUBSAT: {
4560     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4561            "Unexpected custom legalisation");
4562     if (Subtarget.hasStdExtZbb()) {
4563       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4564       // sign extend allows overflow of the lower 32 bits to be detected on
4565       // the promoted size.
4566       SDValue LHS =
4567           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4568       SDValue RHS =
4569           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4570       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4571       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4572       return;
4573     }
4574 
4575     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4576     // promotion for UADDO/USUBO.
4577     Results.push_back(expandAddSubSat(N, DAG));
4578     return;
4579   }
4580   case ISD::BITCAST: {
4581     EVT VT = N->getValueType(0);
4582     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4583     SDValue Op0 = N->getOperand(0);
4584     EVT Op0VT = Op0.getValueType();
4585     MVT XLenVT = Subtarget.getXLenVT();
4586     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4587       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4588       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4589     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4590                Subtarget.hasStdExtF()) {
4591       SDValue FPConv =
4592           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4593       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4594     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4595                isTypeLegal(Op0VT)) {
4596       // Custom-legalize bitcasts from fixed-length vector types to illegal
4597       // scalar types in order to improve codegen. Bitcast the vector to a
4598       // one-element vector type whose element type is the same as the result
4599       // type, and extract the first element.
4600       LLVMContext &Context = *DAG.getContext();
4601       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4602       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4603                                     DAG.getConstant(0, DL, XLenVT)));
4604     }
4605     break;
4606   }
4607   case RISCVISD::GREV:
4608   case RISCVISD::GORC: {
4609     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4610            "Unexpected custom legalisation");
4611     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4612     // This is similar to customLegalizeToWOp, except that we pass the second
4613     // operand (a TargetConstant) straight through: it is already of type
4614     // XLenVT.
4615     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4616     SDValue NewOp0 =
4617         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4618     SDValue NewOp1 =
4619         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4620     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4621     // ReplaceNodeResults requires we maintain the same type for the return
4622     // value.
4623     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4624     break;
4625   }
4626   case RISCVISD::SHFL: {
4627     // There is no SHFLIW instruction, but we can just promote the operation.
4628     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4629            "Unexpected custom legalisation");
4630     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4631     SDValue NewOp0 =
4632         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4633     SDValue NewOp1 =
4634         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4635     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4636     // ReplaceNodeResults requires we maintain the same type for the return
4637     // value.
4638     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4639     break;
4640   }
4641   case ISD::BSWAP:
4642   case ISD::BITREVERSE: {
4643     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4644            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4645     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
4646                                  N->getOperand(0));
4647     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
4648     SDValue GREVIW = DAG.getNode(RISCVISD::GREVW, DL, MVT::i64, NewOp0,
4649                                  DAG.getConstant(Imm, DL, MVT::i64));
4650     // ReplaceNodeResults requires we maintain the same type for the return
4651     // value.
4652     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
4653     break;
4654   }
4655   case ISD::FSHL:
4656   case ISD::FSHR: {
4657     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4658            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4659     SDValue NewOp0 =
4660         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4661     SDValue NewOp1 =
4662         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4663     SDValue NewOp2 =
4664         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4665     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4666     // Mask the shift amount to 5 bits.
4667     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4668                          DAG.getConstant(0x1f, DL, MVT::i64));
4669     unsigned Opc =
4670         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
4671     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
4672     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
4673     break;
4674   }
4675   case ISD::EXTRACT_VECTOR_ELT: {
4676     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
4677     // type is illegal (currently only vXi64 RV32).
4678     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
4679     // transferred to the destination register. We issue two of these from the
4680     // upper- and lower- halves of the SEW-bit vector element, slid down to the
4681     // first element.
4682     SDValue Vec = N->getOperand(0);
4683     SDValue Idx = N->getOperand(1);
4684 
4685     // The vector type hasn't been legalized yet so we can't issue target
4686     // specific nodes if it needs legalization.
4687     // FIXME: We would manually legalize if it's important.
4688     if (!isTypeLegal(Vec.getValueType()))
4689       return;
4690 
4691     MVT VecVT = Vec.getSimpleValueType();
4692 
4693     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
4694            VecVT.getVectorElementType() == MVT::i64 &&
4695            "Unexpected EXTRACT_VECTOR_ELT legalization");
4696 
4697     // If this is a fixed vector, we need to convert it to a scalable vector.
4698     MVT ContainerVT = VecVT;
4699     if (VecVT.isFixedLengthVector()) {
4700       ContainerVT = getContainerForFixedLengthVector(VecVT);
4701       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4702     }
4703 
4704     MVT XLenVT = Subtarget.getXLenVT();
4705 
4706     // Use a VL of 1 to avoid processing more elements than we need.
4707     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4708     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4709     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4710 
4711     // Unless the index is known to be 0, we must slide the vector down to get
4712     // the desired element into index 0.
4713     if (!isNullConstant(Idx)) {
4714       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4715                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4716     }
4717 
4718     // Extract the lower XLEN bits of the correct vector element.
4719     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4720 
4721     // To extract the upper XLEN bits of the vector element, shift the first
4722     // element right by 32 bits and re-extract the lower XLEN bits.
4723     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4724                                      DAG.getConstant(32, DL, XLenVT), VL);
4725     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
4726                                  ThirtyTwoV, Mask, VL);
4727 
4728     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4729 
4730     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4731     break;
4732   }
4733   case ISD::INTRINSIC_WO_CHAIN: {
4734     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4735     switch (IntNo) {
4736     default:
4737       llvm_unreachable(
4738           "Don't know how to custom type legalize this intrinsic!");
4739     case Intrinsic::riscv_orc_b: {
4740       // Lower to the GORCI encoding for orc.b with the operand extended.
4741       SDValue NewOp =
4742           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4743       // If Zbp is enabled, use GORCIW which will sign extend the result.
4744       unsigned Opc =
4745           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
4746       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
4747                                 DAG.getConstant(7, DL, MVT::i64));
4748       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4749       return;
4750     }
4751     case Intrinsic::riscv_grev:
4752     case Intrinsic::riscv_gorc: {
4753       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4754              "Unexpected custom legalisation");
4755       SDValue NewOp1 =
4756           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4757       SDValue NewOp2 =
4758           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4759       unsigned Opc =
4760           IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
4761       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4762       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4763       break;
4764     }
4765     case Intrinsic::riscv_shfl:
4766     case Intrinsic::riscv_unshfl: {
4767       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4768              "Unexpected custom legalisation");
4769       SDValue NewOp1 =
4770           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4771       SDValue NewOp2 =
4772           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4773       unsigned Opc =
4774           IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
4775       if (isa<ConstantSDNode>(N->getOperand(2))) {
4776         NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4777                              DAG.getConstant(0xf, DL, MVT::i64));
4778         Opc =
4779             IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
4780       }
4781       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4782       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4783       break;
4784     }
4785     case Intrinsic::riscv_bcompress:
4786     case Intrinsic::riscv_bdecompress: {
4787       assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4788              "Unexpected custom legalisation");
4789       SDValue NewOp1 =
4790           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4791       SDValue NewOp2 =
4792           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4793       unsigned Opc = IntNo == Intrinsic::riscv_bcompress
4794                          ? RISCVISD::BCOMPRESSW
4795                          : RISCVISD::BDECOMPRESSW;
4796       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
4797       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4798       break;
4799     }
4800     case Intrinsic::riscv_vmv_x_s: {
4801       EVT VT = N->getValueType(0);
4802       MVT XLenVT = Subtarget.getXLenVT();
4803       if (VT.bitsLT(XLenVT)) {
4804         // Simple case just extract using vmv.x.s and truncate.
4805         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
4806                                       Subtarget.getXLenVT(), N->getOperand(1));
4807         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
4808         return;
4809       }
4810 
4811       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
4812              "Unexpected custom legalization");
4813 
4814       // We need to do the move in two steps.
4815       SDValue Vec = N->getOperand(1);
4816       MVT VecVT = Vec.getSimpleValueType();
4817 
4818       // First extract the lower XLEN bits of the element.
4819       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4820 
4821       // To extract the upper XLEN bits of the vector element, shift the first
4822       // element right by 32 bits and re-extract the lower XLEN bits.
4823       SDValue VL = DAG.getConstant(1, DL, XLenVT);
4824       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4825       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4826       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
4827                                        DAG.getConstant(32, DL, XLenVT), VL);
4828       SDValue LShr32 =
4829           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
4830       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4831 
4832       Results.push_back(
4833           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4834       break;
4835     }
4836     }
4837     break;
4838   }
4839   case ISD::VECREDUCE_ADD:
4840   case ISD::VECREDUCE_AND:
4841   case ISD::VECREDUCE_OR:
4842   case ISD::VECREDUCE_XOR:
4843   case ISD::VECREDUCE_SMAX:
4844   case ISD::VECREDUCE_UMAX:
4845   case ISD::VECREDUCE_SMIN:
4846   case ISD::VECREDUCE_UMIN:
4847     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
4848       Results.push_back(V);
4849     break;
4850   case ISD::FLT_ROUNDS_: {
4851     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
4852     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
4853     Results.push_back(Res.getValue(0));
4854     Results.push_back(Res.getValue(1));
4855     break;
4856   }
4857   }
4858 }
4859 
4860 // A structure to hold one of the bit-manipulation patterns below. Together, a
4861 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
4862 //   (or (and (shl x, 1), 0xAAAAAAAA),
4863 //       (and (srl x, 1), 0x55555555))
4864 struct RISCVBitmanipPat {
4865   SDValue Op;
4866   unsigned ShAmt;
4867   bool IsSHL;
4868 
4869   bool formsPairWith(const RISCVBitmanipPat &Other) const {
4870     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
4871   }
4872 };
4873 
4874 // Matches patterns of the form
4875 //   (and (shl x, C2), (C1 << C2))
4876 //   (and (srl x, C2), C1)
4877 //   (shl (and x, C1), C2)
4878 //   (srl (and x, (C1 << C2)), C2)
4879 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
4880 // The expected masks for each shift amount are specified in BitmanipMasks where
4881 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
4882 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
4883 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
4884 // XLen is 64.
4885 static Optional<RISCVBitmanipPat>
4886 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
4887   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
4888          "Unexpected number of masks");
4889   Optional<uint64_t> Mask;
4890   // Optionally consume a mask around the shift operation.
4891   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
4892     Mask = Op.getConstantOperandVal(1);
4893     Op = Op.getOperand(0);
4894   }
4895   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
4896     return None;
4897   bool IsSHL = Op.getOpcode() == ISD::SHL;
4898 
4899   if (!isa<ConstantSDNode>(Op.getOperand(1)))
4900     return None;
4901   uint64_t ShAmt = Op.getConstantOperandVal(1);
4902 
4903   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
4904   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
4905     return None;
4906   // If we don't have enough masks for 64 bit, then we must be trying to
4907   // match SHFL so we're only allowed to shift 1/4 of the width.
4908   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
4909     return None;
4910 
4911   SDValue Src = Op.getOperand(0);
4912 
4913   // The expected mask is shifted left when the AND is found around SHL
4914   // patterns.
4915   //   ((x >> 1) & 0x55555555)
4916   //   ((x << 1) & 0xAAAAAAAA)
4917   bool SHLExpMask = IsSHL;
4918 
4919   if (!Mask) {
4920     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
4921     // the mask is all ones: consume that now.
4922     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
4923       Mask = Src.getConstantOperandVal(1);
4924       Src = Src.getOperand(0);
4925       // The expected mask is now in fact shifted left for SRL, so reverse the
4926       // decision.
4927       //   ((x & 0xAAAAAAAA) >> 1)
4928       //   ((x & 0x55555555) << 1)
4929       SHLExpMask = !SHLExpMask;
4930     } else {
4931       // Use a default shifted mask of all-ones if there's no AND, truncated
4932       // down to the expected width. This simplifies the logic later on.
4933       Mask = maskTrailingOnes<uint64_t>(Width);
4934       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
4935     }
4936   }
4937 
4938   unsigned MaskIdx = Log2_32(ShAmt);
4939   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
4940 
4941   if (SHLExpMask)
4942     ExpMask <<= ShAmt;
4943 
4944   if (Mask != ExpMask)
4945     return None;
4946 
4947   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
4948 }
4949 
4950 // Matches any of the following bit-manipulation patterns:
4951 //   (and (shl x, 1), (0x55555555 << 1))
4952 //   (and (srl x, 1), 0x55555555)
4953 //   (shl (and x, 0x55555555), 1)
4954 //   (srl (and x, (0x55555555 << 1)), 1)
4955 // where the shift amount and mask may vary thus:
4956 //   [1]  = 0x55555555 / 0xAAAAAAAA
4957 //   [2]  = 0x33333333 / 0xCCCCCCCC
4958 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
4959 //   [8]  = 0x00FF00FF / 0xFF00FF00
4960 //   [16] = 0x0000FFFF / 0xFFFFFFFF
4961 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
4962 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
4963   // These are the unshifted masks which we use to match bit-manipulation
4964   // patterns. They may be shifted left in certain circumstances.
4965   static const uint64_t BitmanipMasks[] = {
4966       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
4967       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
4968 
4969   return matchRISCVBitmanipPat(Op, BitmanipMasks);
4970 }
4971 
4972 // Match the following pattern as a GREVI(W) operation
4973 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
4974 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
4975                                const RISCVSubtarget &Subtarget) {
4976   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4977   EVT VT = Op.getValueType();
4978 
4979   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
4980     auto LHS = matchGREVIPat(Op.getOperand(0));
4981     auto RHS = matchGREVIPat(Op.getOperand(1));
4982     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
4983       SDLoc DL(Op);
4984       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
4985                          DAG.getConstant(LHS->ShAmt, DL, VT));
4986     }
4987   }
4988   return SDValue();
4989 }
4990 
4991 // Matches any the following pattern as a GORCI(W) operation
4992 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
4993 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
4994 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
4995 // Note that with the variant of 3.,
4996 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
4997 // the inner pattern will first be matched as GREVI and then the outer
4998 // pattern will be matched to GORC via the first rule above.
4999 // 4.  (or (rotl/rotr x, bitwidth/2), x)
5000 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
5001                                const RISCVSubtarget &Subtarget) {
5002   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5003   EVT VT = Op.getValueType();
5004 
5005   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
5006     SDLoc DL(Op);
5007     SDValue Op0 = Op.getOperand(0);
5008     SDValue Op1 = Op.getOperand(1);
5009 
5010     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
5011       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
5012           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
5013           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
5014         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
5015       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
5016       if ((Reverse.getOpcode() == ISD::ROTL ||
5017            Reverse.getOpcode() == ISD::ROTR) &&
5018           Reverse.getOperand(0) == X &&
5019           isa<ConstantSDNode>(Reverse.getOperand(1))) {
5020         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
5021         if (RotAmt == (VT.getSizeInBits() / 2))
5022           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
5023                              DAG.getConstant(RotAmt, DL, VT));
5024       }
5025       return SDValue();
5026     };
5027 
5028     // Check for either commutable permutation of (or (GREVI x, shamt), x)
5029     if (SDValue V = MatchOROfReverse(Op0, Op1))
5030       return V;
5031     if (SDValue V = MatchOROfReverse(Op1, Op0))
5032       return V;
5033 
5034     // OR is commutable so canonicalize its OR operand to the left
5035     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
5036       std::swap(Op0, Op1);
5037     if (Op0.getOpcode() != ISD::OR)
5038       return SDValue();
5039     SDValue OrOp0 = Op0.getOperand(0);
5040     SDValue OrOp1 = Op0.getOperand(1);
5041     auto LHS = matchGREVIPat(OrOp0);
5042     // OR is commutable so swap the operands and try again: x might have been
5043     // on the left
5044     if (!LHS) {
5045       std::swap(OrOp0, OrOp1);
5046       LHS = matchGREVIPat(OrOp0);
5047     }
5048     auto RHS = matchGREVIPat(Op1);
5049     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
5050       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
5051                          DAG.getConstant(LHS->ShAmt, DL, VT));
5052     }
5053   }
5054   return SDValue();
5055 }
5056 
5057 // Matches any of the following bit-manipulation patterns:
5058 //   (and (shl x, 1), (0x22222222 << 1))
5059 //   (and (srl x, 1), 0x22222222)
5060 //   (shl (and x, 0x22222222), 1)
5061 //   (srl (and x, (0x22222222 << 1)), 1)
5062 // where the shift amount and mask may vary thus:
5063 //   [1]  = 0x22222222 / 0x44444444
5064 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
5065 //   [4]  = 0x00F000F0 / 0x0F000F00
5066 //   [8]  = 0x0000FF00 / 0x00FF0000
5067 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
5068 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
5069   // These are the unshifted masks which we use to match bit-manipulation
5070   // patterns. They may be shifted left in certain circumstances.
5071   static const uint64_t BitmanipMasks[] = {
5072       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
5073       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
5074 
5075   return matchRISCVBitmanipPat(Op, BitmanipMasks);
5076 }
5077 
5078 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
5079 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
5080                                const RISCVSubtarget &Subtarget) {
5081   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
5082   EVT VT = Op.getValueType();
5083 
5084   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
5085     return SDValue();
5086 
5087   SDValue Op0 = Op.getOperand(0);
5088   SDValue Op1 = Op.getOperand(1);
5089 
5090   // Or is commutable so canonicalize the second OR to the LHS.
5091   if (Op0.getOpcode() != ISD::OR)
5092     std::swap(Op0, Op1);
5093   if (Op0.getOpcode() != ISD::OR)
5094     return SDValue();
5095 
5096   // We found an inner OR, so our operands are the operands of the inner OR
5097   // and the other operand of the outer OR.
5098   SDValue A = Op0.getOperand(0);
5099   SDValue B = Op0.getOperand(1);
5100   SDValue C = Op1;
5101 
5102   auto Match1 = matchSHFLPat(A);
5103   auto Match2 = matchSHFLPat(B);
5104 
5105   // If neither matched, we failed.
5106   if (!Match1 && !Match2)
5107     return SDValue();
5108 
5109   // We had at least one match. if one failed, try the remaining C operand.
5110   if (!Match1) {
5111     std::swap(A, C);
5112     Match1 = matchSHFLPat(A);
5113     if (!Match1)
5114       return SDValue();
5115   } else if (!Match2) {
5116     std::swap(B, C);
5117     Match2 = matchSHFLPat(B);
5118     if (!Match2)
5119       return SDValue();
5120   }
5121   assert(Match1 && Match2);
5122 
5123   // Make sure our matches pair up.
5124   if (!Match1->formsPairWith(*Match2))
5125     return SDValue();
5126 
5127   // All the remains is to make sure C is an AND with the same input, that masks
5128   // out the bits that are being shuffled.
5129   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
5130       C.getOperand(0) != Match1->Op)
5131     return SDValue();
5132 
5133   uint64_t Mask = C.getConstantOperandVal(1);
5134 
5135   static const uint64_t BitmanipMasks[] = {
5136       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
5137       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
5138   };
5139 
5140   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
5141   unsigned MaskIdx = Log2_32(Match1->ShAmt);
5142   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
5143 
5144   if (Mask != ExpMask)
5145     return SDValue();
5146 
5147   SDLoc DL(Op);
5148   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5149                      DAG.getConstant(Match1->ShAmt, DL, VT));
5150 }
5151 
5152 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5153 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5154 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5155 // not undo itself, but they are redundant.
5156 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5157   SDValue Src = N->getOperand(0);
5158 
5159   if (Src.getOpcode() != N->getOpcode())
5160     return SDValue();
5161 
5162   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5163       !isa<ConstantSDNode>(Src.getOperand(1)))
5164     return SDValue();
5165 
5166   unsigned ShAmt1 = N->getConstantOperandVal(1);
5167   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5168   Src = Src.getOperand(0);
5169 
5170   unsigned CombinedShAmt;
5171   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5172     CombinedShAmt = ShAmt1 | ShAmt2;
5173   else
5174     CombinedShAmt = ShAmt1 ^ ShAmt2;
5175 
5176   if (CombinedShAmt == 0)
5177     return Src;
5178 
5179   SDLoc DL(N);
5180   return DAG.getNode(
5181       N->getOpcode(), DL, N->getValueType(0), Src,
5182       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5183 }
5184 
5185 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5186                                                DAGCombinerInfo &DCI) const {
5187   SelectionDAG &DAG = DCI.DAG;
5188 
5189   switch (N->getOpcode()) {
5190   default:
5191     break;
5192   case RISCVISD::SplitF64: {
5193     SDValue Op0 = N->getOperand(0);
5194     // If the input to SplitF64 is just BuildPairF64 then the operation is
5195     // redundant. Instead, use BuildPairF64's operands directly.
5196     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5197       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5198 
5199     SDLoc DL(N);
5200 
5201     // It's cheaper to materialise two 32-bit integers than to load a double
5202     // from the constant pool and transfer it to integer registers through the
5203     // stack.
5204     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5205       APInt V = C->getValueAPF().bitcastToAPInt();
5206       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5207       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5208       return DCI.CombineTo(N, Lo, Hi);
5209     }
5210 
5211     // This is a target-specific version of a DAGCombine performed in
5212     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5213     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5214     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5215     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5216         !Op0.getNode()->hasOneUse())
5217       break;
5218     SDValue NewSplitF64 =
5219         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5220                     Op0.getOperand(0));
5221     SDValue Lo = NewSplitF64.getValue(0);
5222     SDValue Hi = NewSplitF64.getValue(1);
5223     APInt SignBit = APInt::getSignMask(32);
5224     if (Op0.getOpcode() == ISD::FNEG) {
5225       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5226                                   DAG.getConstant(SignBit, DL, MVT::i32));
5227       return DCI.CombineTo(N, Lo, NewHi);
5228     }
5229     assert(Op0.getOpcode() == ISD::FABS);
5230     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5231                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5232     return DCI.CombineTo(N, Lo, NewHi);
5233   }
5234   case RISCVISD::SLLW:
5235   case RISCVISD::SRAW:
5236   case RISCVISD::SRLW:
5237   case RISCVISD::ROLW:
5238   case RISCVISD::RORW: {
5239     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5240     SDValue LHS = N->getOperand(0);
5241     SDValue RHS = N->getOperand(1);
5242     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5243     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5244     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5245         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5246       if (N->getOpcode() != ISD::DELETED_NODE)
5247         DCI.AddToWorklist(N);
5248       return SDValue(N, 0);
5249     }
5250     break;
5251   }
5252   case RISCVISD::CLZW:
5253   case RISCVISD::CTZW: {
5254     // Only the lower 32 bits of the first operand are read
5255     SDValue Op0 = N->getOperand(0);
5256     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5257     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5258       if (N->getOpcode() != ISD::DELETED_NODE)
5259         DCI.AddToWorklist(N);
5260       return SDValue(N, 0);
5261     }
5262     break;
5263   }
5264   case RISCVISD::FSL:
5265   case RISCVISD::FSR: {
5266     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5267     SDValue ShAmt = N->getOperand(2);
5268     unsigned BitWidth = ShAmt.getValueSizeInBits();
5269     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5270     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5271     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5272       if (N->getOpcode() != ISD::DELETED_NODE)
5273         DCI.AddToWorklist(N);
5274       return SDValue(N, 0);
5275     }
5276     break;
5277   }
5278   case RISCVISD::FSLW:
5279   case RISCVISD::FSRW: {
5280     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5281     // read.
5282     SDValue Op0 = N->getOperand(0);
5283     SDValue Op1 = N->getOperand(1);
5284     SDValue ShAmt = N->getOperand(2);
5285     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5286     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5287     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5288         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5289         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5290       if (N->getOpcode() != ISD::DELETED_NODE)
5291         DCI.AddToWorklist(N);
5292       return SDValue(N, 0);
5293     }
5294     break;
5295   }
5296   case RISCVISD::GREV:
5297   case RISCVISD::GORC: {
5298     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5299     SDValue ShAmt = N->getOperand(1);
5300     unsigned BitWidth = ShAmt.getValueSizeInBits();
5301     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5302     APInt ShAmtMask(BitWidth, BitWidth - 1);
5303     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5304       if (N->getOpcode() != ISD::DELETED_NODE)
5305         DCI.AddToWorklist(N);
5306       return SDValue(N, 0);
5307     }
5308 
5309     return combineGREVI_GORCI(N, DCI.DAG);
5310   }
5311   case RISCVISD::GREVW:
5312   case RISCVISD::GORCW: {
5313     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5314     SDValue LHS = N->getOperand(0);
5315     SDValue RHS = N->getOperand(1);
5316     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5317     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5318     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5319         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5320       if (N->getOpcode() != ISD::DELETED_NODE)
5321         DCI.AddToWorklist(N);
5322       return SDValue(N, 0);
5323     }
5324 
5325     return combineGREVI_GORCI(N, DCI.DAG);
5326   }
5327   case RISCVISD::SHFL:
5328   case RISCVISD::UNSHFL: {
5329     // Only the lower log2(Bitwidth) bits of the the shift amount are read.
5330     SDValue ShAmt = N->getOperand(1);
5331     unsigned BitWidth = ShAmt.getValueSizeInBits();
5332     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5333     APInt ShAmtMask(BitWidth, (BitWidth / 2) - 1);
5334     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5335       if (N->getOpcode() != ISD::DELETED_NODE)
5336         DCI.AddToWorklist(N);
5337       return SDValue(N, 0);
5338     }
5339 
5340     break;
5341   }
5342   case RISCVISD::SHFLW:
5343   case RISCVISD::UNSHFLW: {
5344     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5345     SDValue LHS = N->getOperand(0);
5346     SDValue RHS = N->getOperand(1);
5347     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5348     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
5349     if (SimplifyDemandedBits(LHS, LHSMask, DCI) ||
5350         SimplifyDemandedBits(RHS, RHSMask, DCI)) {
5351       if (N->getOpcode() != ISD::DELETED_NODE)
5352         DCI.AddToWorklist(N);
5353       return SDValue(N, 0);
5354     }
5355 
5356     break;
5357   }
5358   case RISCVISD::BCOMPRESSW:
5359   case RISCVISD::BDECOMPRESSW: {
5360     // Only the lower 32 bits of LHS and RHS are read.
5361     SDValue LHS = N->getOperand(0);
5362     SDValue RHS = N->getOperand(1);
5363     APInt Mask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5364     if (SimplifyDemandedBits(LHS, Mask, DCI) ||
5365         SimplifyDemandedBits(RHS, Mask, DCI)) {
5366       if (N->getOpcode() != ISD::DELETED_NODE)
5367         DCI.AddToWorklist(N);
5368       return SDValue(N, 0);
5369     }
5370 
5371     break;
5372   }
5373   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5374     SDLoc DL(N);
5375     SDValue Op0 = N->getOperand(0);
5376     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5377     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5378     // of the FMV_W_X_RV64 operand.
5379     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5380       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5381              "Unexpected value type!");
5382       return Op0.getOperand(0);
5383     }
5384 
5385     // This is a target-specific version of a DAGCombine performed in
5386     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5387     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5388     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5389     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5390         !Op0.getNode()->hasOneUse())
5391       break;
5392     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5393                                  Op0.getOperand(0));
5394     APInt SignBit = APInt::getSignMask(32).sext(64);
5395     if (Op0.getOpcode() == ISD::FNEG)
5396       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5397                          DAG.getConstant(SignBit, DL, MVT::i64));
5398 
5399     assert(Op0.getOpcode() == ISD::FABS);
5400     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5401                        DAG.getConstant(~SignBit, DL, MVT::i64));
5402   }
5403   case ISD::OR:
5404     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
5405       return GREV;
5406     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
5407       return GORC;
5408     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget))
5409       return SHFL;
5410     break;
5411   case RISCVISD::SELECT_CC: {
5412     // Transform
5413     SDValue LHS = N->getOperand(0);
5414     SDValue RHS = N->getOperand(1);
5415     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5416     if (!ISD::isIntEqualitySetCC(CCVal))
5417       break;
5418 
5419     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5420     //      (select_cc X, Y, lt, trueV, falseV)
5421     // Sometimes the setcc is introduced after select_cc has been formed.
5422     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5423         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5424       // If we're looking for eq 0 instead of ne 0, we need to invert the
5425       // condition.
5426       bool Invert = CCVal == ISD::SETEQ;
5427       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5428       if (Invert)
5429         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5430 
5431       SDLoc DL(N);
5432       RHS = LHS.getOperand(1);
5433       LHS = LHS.getOperand(0);
5434       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5435 
5436       SDValue TargetCC =
5437           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5438       return DAG.getNode(
5439           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5440           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5441     }
5442 
5443     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5444     //      (select_cc X, Y, eq/ne, trueV, falseV)
5445     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5446       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5447                          {LHS.getOperand(0), LHS.getOperand(1),
5448                           N->getOperand(2), N->getOperand(3),
5449                           N->getOperand(4)});
5450     // (select_cc X, 1, setne, trueV, falseV) ->
5451     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5452     // This can occur when legalizing some floating point comparisons.
5453     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5454     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5455       SDLoc DL(N);
5456       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5457       SDValue TargetCC =
5458           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5459       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5460       return DAG.getNode(
5461           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5462           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5463     }
5464 
5465     break;
5466   }
5467   case RISCVISD::BR_CC: {
5468     SDValue LHS = N->getOperand(1);
5469     SDValue RHS = N->getOperand(2);
5470     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5471     if (!ISD::isIntEqualitySetCC(CCVal))
5472       break;
5473 
5474     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5475     //      (br_cc X, Y, lt, dest)
5476     // Sometimes the setcc is introduced after br_cc has been formed.
5477     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5478         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5479       // If we're looking for eq 0 instead of ne 0, we need to invert the
5480       // condition.
5481       bool Invert = CCVal == ISD::SETEQ;
5482       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5483       if (Invert)
5484         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5485 
5486       SDLoc DL(N);
5487       RHS = LHS.getOperand(1);
5488       LHS = LHS.getOperand(0);
5489       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5490 
5491       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5492                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5493                          N->getOperand(4));
5494     }
5495 
5496     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5497     //      (br_cc X, Y, eq/ne, trueV, falseV)
5498     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5499       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5500                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5501                          N->getOperand(3), N->getOperand(4));
5502 
5503     // (br_cc X, 1, setne, br_cc) ->
5504     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5505     // This can occur when legalizing some floating point comparisons.
5506     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5507     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5508       SDLoc DL(N);
5509       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5510       SDValue TargetCC = DAG.getCondCode(CCVal);
5511       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5512       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5513                          N->getOperand(0), LHS, RHS, TargetCC,
5514                          N->getOperand(4));
5515     }
5516     break;
5517   }
5518   case ISD::FCOPYSIGN: {
5519     EVT VT = N->getValueType(0);
5520     if (!VT.isVector())
5521       break;
5522     // There is a form of VFSGNJ which injects the negated sign of its second
5523     // operand. Try and bubble any FNEG up after the extend/round to produce
5524     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5525     // TRUNC=1.
5526     SDValue In2 = N->getOperand(1);
5527     // Avoid cases where the extend/round has multiple uses, as duplicating
5528     // those is typically more expensive than removing a fneg.
5529     if (!In2.hasOneUse())
5530       break;
5531     if (In2.getOpcode() != ISD::FP_EXTEND &&
5532         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5533       break;
5534     In2 = In2.getOperand(0);
5535     if (In2.getOpcode() != ISD::FNEG)
5536       break;
5537     SDLoc DL(N);
5538     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5539     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5540                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5541   }
5542   case ISD::MGATHER:
5543   case ISD::MSCATTER: {
5544     if (!DCI.isBeforeLegalize())
5545       break;
5546     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5547     SDValue Index = MGSN->getIndex();
5548     EVT IndexVT = Index.getValueType();
5549     MVT XLenVT = Subtarget.getXLenVT();
5550     // RISCV indexed loads only support the "unsigned unscaled" addressing
5551     // mode, so anything else must be manually legalized.
5552     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5553                                 (MGSN->isIndexSigned() &&
5554                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5555     if (!NeedsIdxLegalization)
5556       break;
5557 
5558     SDLoc DL(N);
5559 
5560     // Any index legalization should first promote to XLenVT, so we don't lose
5561     // bits when scaling. This may create an illegal index type so we let
5562     // LLVM's legalization take care of the splitting.
5563     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5564       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5565       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5566                                                 : ISD::ZERO_EXTEND,
5567                           DL, IndexVT, Index);
5568     }
5569 
5570     unsigned Scale = N->getConstantOperandVal(5);
5571     if (MGSN->isIndexScaled() && Scale != 1) {
5572       // Manually scale the indices by the element size.
5573       // TODO: Sanitize the scale operand here?
5574       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5575       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5576       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5577     }
5578 
5579     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
5580     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
5581       return DAG.getMaskedGather(
5582           N->getVTList(), MGSN->getMemoryVT(), DL,
5583           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
5584            MGSN->getBasePtr(), Index, MGN->getScale()},
5585           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
5586     }
5587     const auto *MSN = cast<MaskedScatterSDNode>(N);
5588     return DAG.getMaskedScatter(
5589         N->getVTList(), MGSN->getMemoryVT(), DL,
5590         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
5591          Index, MGSN->getScale()},
5592         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
5593   }
5594   }
5595 
5596   return SDValue();
5597 }
5598 
5599 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
5600     const SDNode *N, CombineLevel Level) const {
5601   // The following folds are only desirable if `(OP _, c1 << c2)` can be
5602   // materialised in fewer instructions than `(OP _, c1)`:
5603   //
5604   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
5605   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
5606   SDValue N0 = N->getOperand(0);
5607   EVT Ty = N0.getValueType();
5608   if (Ty.isScalarInteger() &&
5609       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
5610     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
5611     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
5612     if (C1 && C2) {
5613       const APInt &C1Int = C1->getAPIntValue();
5614       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
5615 
5616       // We can materialise `c1 << c2` into an add immediate, so it's "free",
5617       // and the combine should happen, to potentially allow further combines
5618       // later.
5619       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
5620           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
5621         return true;
5622 
5623       // We can materialise `c1` in an add immediate, so it's "free", and the
5624       // combine should be prevented.
5625       if (C1Int.getMinSignedBits() <= 64 &&
5626           isLegalAddImmediate(C1Int.getSExtValue()))
5627         return false;
5628 
5629       // Neither constant will fit into an immediate, so find materialisation
5630       // costs.
5631       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
5632                                               Subtarget.is64Bit());
5633       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
5634           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
5635 
5636       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
5637       // combine should be prevented.
5638       if (C1Cost < ShiftedC1Cost)
5639         return false;
5640     }
5641   }
5642   return true;
5643 }
5644 
5645 bool RISCVTargetLowering::targetShrinkDemandedConstant(
5646     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
5647     TargetLoweringOpt &TLO) const {
5648   // Delay this optimization as late as possible.
5649   if (!TLO.LegalOps)
5650     return false;
5651 
5652   EVT VT = Op.getValueType();
5653   if (VT.isVector())
5654     return false;
5655 
5656   // Only handle AND for now.
5657   if (Op.getOpcode() != ISD::AND)
5658     return false;
5659 
5660   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5661   if (!C)
5662     return false;
5663 
5664   const APInt &Mask = C->getAPIntValue();
5665 
5666   // Clear all non-demanded bits initially.
5667   APInt ShrunkMask = Mask & DemandedBits;
5668 
5669   // Try to make a smaller immediate by setting undemanded bits.
5670 
5671   APInt ExpandedMask = Mask | ~DemandedBits;
5672 
5673   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
5674     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
5675   };
5676   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
5677     if (NewMask == Mask)
5678       return true;
5679     SDLoc DL(Op);
5680     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
5681     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
5682     return TLO.CombineTo(Op, NewOp);
5683   };
5684 
5685   // If the shrunk mask fits in sign extended 12 bits, let the target
5686   // independent code apply it.
5687   if (ShrunkMask.isSignedIntN(12))
5688     return false;
5689 
5690   // Preserve (and X, 0xffff) when zext.h is supported.
5691   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
5692     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
5693     if (IsLegalMask(NewMask))
5694       return UseMask(NewMask);
5695   }
5696 
5697   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
5698   if (VT == MVT::i64) {
5699     APInt NewMask = APInt(64, 0xffffffff);
5700     if (IsLegalMask(NewMask))
5701       return UseMask(NewMask);
5702   }
5703 
5704   // For the remaining optimizations, we need to be able to make a negative
5705   // number through a combination of mask and undemanded bits.
5706   if (!ExpandedMask.isNegative())
5707     return false;
5708 
5709   // What is the fewest number of bits we need to represent the negative number.
5710   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
5711 
5712   // Try to make a 12 bit negative immediate. If that fails try to make a 32
5713   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
5714   APInt NewMask = ShrunkMask;
5715   if (MinSignedBits <= 12)
5716     NewMask.setBitsFrom(11);
5717   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
5718     NewMask.setBitsFrom(31);
5719   else
5720     return false;
5721 
5722   // Sanity check that our new mask is a subset of the demanded mask.
5723   assert(IsLegalMask(NewMask));
5724   return UseMask(NewMask);
5725 }
5726 
5727 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
5728                                                         KnownBits &Known,
5729                                                         const APInt &DemandedElts,
5730                                                         const SelectionDAG &DAG,
5731                                                         unsigned Depth) const {
5732   unsigned BitWidth = Known.getBitWidth();
5733   unsigned Opc = Op.getOpcode();
5734   assert((Opc >= ISD::BUILTIN_OP_END ||
5735           Opc == ISD::INTRINSIC_WO_CHAIN ||
5736           Opc == ISD::INTRINSIC_W_CHAIN ||
5737           Opc == ISD::INTRINSIC_VOID) &&
5738          "Should use MaskedValueIsZero if you don't know whether Op"
5739          " is a target node!");
5740 
5741   Known.resetAll();
5742   switch (Opc) {
5743   default: break;
5744   case RISCVISD::SELECT_CC: {
5745     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
5746     // If we don't know any bits, early out.
5747     if (Known.isUnknown())
5748       break;
5749     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
5750 
5751     // Only known if known in both the LHS and RHS.
5752     Known = KnownBits::commonBits(Known, Known2);
5753     break;
5754   }
5755   case RISCVISD::REMUW: {
5756     KnownBits Known2;
5757     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5758     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5759     // We only care about the lower 32 bits.
5760     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
5761     // Restore the original width by sign extending.
5762     Known = Known.sext(BitWidth);
5763     break;
5764   }
5765   case RISCVISD::DIVUW: {
5766     KnownBits Known2;
5767     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5768     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5769     // We only care about the lower 32 bits.
5770     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
5771     // Restore the original width by sign extending.
5772     Known = Known.sext(BitWidth);
5773     break;
5774   }
5775   case RISCVISD::CTZW: {
5776     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5777     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
5778     unsigned LowBits = Log2_32(PossibleTZ) + 1;
5779     Known.Zero.setBitsFrom(LowBits);
5780     break;
5781   }
5782   case RISCVISD::CLZW: {
5783     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5784     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
5785     unsigned LowBits = Log2_32(PossibleLZ) + 1;
5786     Known.Zero.setBitsFrom(LowBits);
5787     break;
5788   }
5789   case RISCVISD::READ_VLENB:
5790     // We assume VLENB is at least 16 bytes.
5791     Known.Zero.setLowBits(4);
5792     break;
5793   }
5794 }
5795 
5796 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
5797     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
5798     unsigned Depth) const {
5799   switch (Op.getOpcode()) {
5800   default:
5801     break;
5802   case RISCVISD::SLLW:
5803   case RISCVISD::SRAW:
5804   case RISCVISD::SRLW:
5805   case RISCVISD::DIVW:
5806   case RISCVISD::DIVUW:
5807   case RISCVISD::REMUW:
5808   case RISCVISD::ROLW:
5809   case RISCVISD::RORW:
5810   case RISCVISD::GREVW:
5811   case RISCVISD::GORCW:
5812   case RISCVISD::FSLW:
5813   case RISCVISD::FSRW:
5814   case RISCVISD::SHFLW:
5815   case RISCVISD::UNSHFLW:
5816   case RISCVISD::BCOMPRESSW:
5817   case RISCVISD::BDECOMPRESSW:
5818     // TODO: As the result is sign-extended, this is conservatively correct. A
5819     // more precise answer could be calculated for SRAW depending on known
5820     // bits in the shift amount.
5821     return 33;
5822   case RISCVISD::SHFL:
5823   case RISCVISD::UNSHFL: {
5824     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
5825     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
5826     // will stay within the upper 32 bits. If there were more than 32 sign bits
5827     // before there will be at least 33 sign bits after.
5828     if (Op.getValueType() == MVT::i64 &&
5829         isa<ConstantSDNode>(Op.getOperand(1)) &&
5830         (Op.getConstantOperandVal(1) & 0x10) == 0) {
5831       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5832       if (Tmp > 32)
5833         return 33;
5834     }
5835     break;
5836   }
5837   case RISCVISD::VMV_X_S:
5838     // The number of sign bits of the scalar result is computed by obtaining the
5839     // element type of the input vector operand, subtracting its width from the
5840     // XLEN, and then adding one (sign bit within the element type). If the
5841     // element type is wider than XLen, the least-significant XLEN bits are
5842     // taken.
5843     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
5844       return 1;
5845     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
5846   }
5847 
5848   return 1;
5849 }
5850 
5851 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
5852                                                   MachineBasicBlock *BB) {
5853   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
5854 
5855   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
5856   // Should the count have wrapped while it was being read, we need to try
5857   // again.
5858   // ...
5859   // read:
5860   // rdcycleh x3 # load high word of cycle
5861   // rdcycle  x2 # load low word of cycle
5862   // rdcycleh x4 # load high word of cycle
5863   // bne x3, x4, read # check if high word reads match, otherwise try again
5864   // ...
5865 
5866   MachineFunction &MF = *BB->getParent();
5867   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5868   MachineFunction::iterator It = ++BB->getIterator();
5869 
5870   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
5871   MF.insert(It, LoopMBB);
5872 
5873   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
5874   MF.insert(It, DoneMBB);
5875 
5876   // Transfer the remainder of BB and its successor edges to DoneMBB.
5877   DoneMBB->splice(DoneMBB->begin(), BB,
5878                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
5879   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
5880 
5881   BB->addSuccessor(LoopMBB);
5882 
5883   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5884   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
5885   Register LoReg = MI.getOperand(0).getReg();
5886   Register HiReg = MI.getOperand(1).getReg();
5887   DebugLoc DL = MI.getDebugLoc();
5888 
5889   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5890   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
5891       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
5892       .addReg(RISCV::X0);
5893   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
5894       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
5895       .addReg(RISCV::X0);
5896   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
5897       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
5898       .addReg(RISCV::X0);
5899 
5900   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
5901       .addReg(HiReg)
5902       .addReg(ReadAgainReg)
5903       .addMBB(LoopMBB);
5904 
5905   LoopMBB->addSuccessor(LoopMBB);
5906   LoopMBB->addSuccessor(DoneMBB);
5907 
5908   MI.eraseFromParent();
5909 
5910   return DoneMBB;
5911 }
5912 
5913 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
5914                                              MachineBasicBlock *BB) {
5915   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
5916 
5917   MachineFunction &MF = *BB->getParent();
5918   DebugLoc DL = MI.getDebugLoc();
5919   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5920   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
5921   Register LoReg = MI.getOperand(0).getReg();
5922   Register HiReg = MI.getOperand(1).getReg();
5923   Register SrcReg = MI.getOperand(2).getReg();
5924   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
5925   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
5926 
5927   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
5928                           RI);
5929   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
5930   MachineMemOperand *MMOLo =
5931       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
5932   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
5933       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
5934   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
5935       .addFrameIndex(FI)
5936       .addImm(0)
5937       .addMemOperand(MMOLo);
5938   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
5939       .addFrameIndex(FI)
5940       .addImm(4)
5941       .addMemOperand(MMOHi);
5942   MI.eraseFromParent(); // The pseudo instruction is gone now.
5943   return BB;
5944 }
5945 
5946 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
5947                                                  MachineBasicBlock *BB) {
5948   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
5949          "Unexpected instruction");
5950 
5951   MachineFunction &MF = *BB->getParent();
5952   DebugLoc DL = MI.getDebugLoc();
5953   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5954   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
5955   Register DstReg = MI.getOperand(0).getReg();
5956   Register LoReg = MI.getOperand(1).getReg();
5957   Register HiReg = MI.getOperand(2).getReg();
5958   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
5959   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
5960 
5961   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
5962   MachineMemOperand *MMOLo =
5963       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
5964   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
5965       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
5966   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
5967       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
5968       .addFrameIndex(FI)
5969       .addImm(0)
5970       .addMemOperand(MMOLo);
5971   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
5972       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
5973       .addFrameIndex(FI)
5974       .addImm(4)
5975       .addMemOperand(MMOHi);
5976   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
5977   MI.eraseFromParent(); // The pseudo instruction is gone now.
5978   return BB;
5979 }
5980 
5981 static bool isSelectPseudo(MachineInstr &MI) {
5982   switch (MI.getOpcode()) {
5983   default:
5984     return false;
5985   case RISCV::Select_GPR_Using_CC_GPR:
5986   case RISCV::Select_FPR16_Using_CC_GPR:
5987   case RISCV::Select_FPR32_Using_CC_GPR:
5988   case RISCV::Select_FPR64_Using_CC_GPR:
5989     return true;
5990   }
5991 }
5992 
5993 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
5994                                            MachineBasicBlock *BB) {
5995   // To "insert" Select_* instructions, we actually have to insert the triangle
5996   // control-flow pattern.  The incoming instructions know the destination vreg
5997   // to set, the condition code register to branch on, the true/false values to
5998   // select between, and the condcode to use to select the appropriate branch.
5999   //
6000   // We produce the following control flow:
6001   //     HeadMBB
6002   //     |  \
6003   //     |  IfFalseMBB
6004   //     | /
6005   //    TailMBB
6006   //
6007   // When we find a sequence of selects we attempt to optimize their emission
6008   // by sharing the control flow. Currently we only handle cases where we have
6009   // multiple selects with the exact same condition (same LHS, RHS and CC).
6010   // The selects may be interleaved with other instructions if the other
6011   // instructions meet some requirements we deem safe:
6012   // - They are debug instructions. Otherwise,
6013   // - They do not have side-effects, do not access memory and their inputs do
6014   //   not depend on the results of the select pseudo-instructions.
6015   // The TrueV/FalseV operands of the selects cannot depend on the result of
6016   // previous selects in the sequence.
6017   // These conditions could be further relaxed. See the X86 target for a
6018   // related approach and more information.
6019   Register LHS = MI.getOperand(1).getReg();
6020   Register RHS = MI.getOperand(2).getReg();
6021   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
6022 
6023   SmallVector<MachineInstr *, 4> SelectDebugValues;
6024   SmallSet<Register, 4> SelectDests;
6025   SelectDests.insert(MI.getOperand(0).getReg());
6026 
6027   MachineInstr *LastSelectPseudo = &MI;
6028 
6029   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
6030        SequenceMBBI != E; ++SequenceMBBI) {
6031     if (SequenceMBBI->isDebugInstr())
6032       continue;
6033     else if (isSelectPseudo(*SequenceMBBI)) {
6034       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
6035           SequenceMBBI->getOperand(2).getReg() != RHS ||
6036           SequenceMBBI->getOperand(3).getImm() != CC ||
6037           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
6038           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
6039         break;
6040       LastSelectPseudo = &*SequenceMBBI;
6041       SequenceMBBI->collectDebugValues(SelectDebugValues);
6042       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
6043     } else {
6044       if (SequenceMBBI->hasUnmodeledSideEffects() ||
6045           SequenceMBBI->mayLoadOrStore())
6046         break;
6047       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
6048             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
6049           }))
6050         break;
6051     }
6052   }
6053 
6054   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
6055   const BasicBlock *LLVM_BB = BB->getBasicBlock();
6056   DebugLoc DL = MI.getDebugLoc();
6057   MachineFunction::iterator I = ++BB->getIterator();
6058 
6059   MachineBasicBlock *HeadMBB = BB;
6060   MachineFunction *F = BB->getParent();
6061   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
6062   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
6063 
6064   F->insert(I, IfFalseMBB);
6065   F->insert(I, TailMBB);
6066 
6067   // Transfer debug instructions associated with the selects to TailMBB.
6068   for (MachineInstr *DebugInstr : SelectDebugValues) {
6069     TailMBB->push_back(DebugInstr->removeFromParent());
6070   }
6071 
6072   // Move all instructions after the sequence to TailMBB.
6073   TailMBB->splice(TailMBB->end(), HeadMBB,
6074                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
6075   // Update machine-CFG edges by transferring all successors of the current
6076   // block to the new block which will contain the Phi nodes for the selects.
6077   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
6078   // Set the successors for HeadMBB.
6079   HeadMBB->addSuccessor(IfFalseMBB);
6080   HeadMBB->addSuccessor(TailMBB);
6081 
6082   // Insert appropriate branch.
6083   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
6084 
6085   BuildMI(HeadMBB, DL, TII.get(Opcode))
6086     .addReg(LHS)
6087     .addReg(RHS)
6088     .addMBB(TailMBB);
6089 
6090   // IfFalseMBB just falls through to TailMBB.
6091   IfFalseMBB->addSuccessor(TailMBB);
6092 
6093   // Create PHIs for all of the select pseudo-instructions.
6094   auto SelectMBBI = MI.getIterator();
6095   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
6096   auto InsertionPoint = TailMBB->begin();
6097   while (SelectMBBI != SelectEnd) {
6098     auto Next = std::next(SelectMBBI);
6099     if (isSelectPseudo(*SelectMBBI)) {
6100       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
6101       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
6102               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
6103           .addReg(SelectMBBI->getOperand(4).getReg())
6104           .addMBB(HeadMBB)
6105           .addReg(SelectMBBI->getOperand(5).getReg())
6106           .addMBB(IfFalseMBB);
6107       SelectMBBI->eraseFromParent();
6108     }
6109     SelectMBBI = Next;
6110   }
6111 
6112   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
6113   return TailMBB;
6114 }
6115 
6116 static MachineInstr *elideCopies(MachineInstr *MI,
6117                                  const MachineRegisterInfo &MRI) {
6118   while (true) {
6119     if (!MI->isFullCopy())
6120       return MI;
6121     if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
6122       return nullptr;
6123     MI = MRI.getVRegDef(MI->getOperand(1).getReg());
6124     if (!MI)
6125       return nullptr;
6126   }
6127 }
6128 
6129 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
6130                                     int VLIndex, unsigned SEWIndex,
6131                                     RISCVVLMUL VLMul, bool ForceTailAgnostic) {
6132   MachineFunction &MF = *BB->getParent();
6133   DebugLoc DL = MI.getDebugLoc();
6134   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
6135 
6136   unsigned SEW = MI.getOperand(SEWIndex).getImm();
6137   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
6138   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
6139 
6140   MachineRegisterInfo &MRI = MF.getRegInfo();
6141 
6142   auto BuildVSETVLI = [&]() {
6143     if (VLIndex >= 0) {
6144       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
6145       Register VLReg = MI.getOperand(VLIndex).getReg();
6146 
6147       // VL might be a compile time constant, but isel would have to put it
6148       // in a register. See if VL comes from an ADDI X0, imm.
6149       if (VLReg.isVirtual()) {
6150         MachineInstr *Def = MRI.getVRegDef(VLReg);
6151         if (Def && Def->getOpcode() == RISCV::ADDI &&
6152             Def->getOperand(1).getReg() == RISCV::X0 &&
6153             Def->getOperand(2).isImm()) {
6154           uint64_t Imm = Def->getOperand(2).getImm();
6155           // VSETIVLI allows a 5-bit zero extended immediate.
6156           if (isUInt<5>(Imm))
6157             return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
6158                 .addReg(DestReg, RegState::Define | RegState::Dead)
6159                 .addImm(Imm);
6160         }
6161       }
6162 
6163       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6164           .addReg(DestReg, RegState::Define | RegState::Dead)
6165           .addReg(VLReg);
6166     }
6167 
6168     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
6169     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
6170         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
6171         .addReg(RISCV::X0, RegState::Kill);
6172   };
6173 
6174   MachineInstrBuilder MIB = BuildVSETVLI();
6175 
6176   // Default to tail agnostic unless the destination is tied to a source. In
6177   // that case the user would have some control over the tail values. The tail
6178   // policy is also ignored on instructions that only update element 0 like
6179   // vmv.s.x or reductions so use agnostic there to match the common case.
6180   // FIXME: This is conservatively correct, but we might want to detect that
6181   // the input is undefined.
6182   bool TailAgnostic = true;
6183   unsigned UseOpIdx;
6184   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
6185     TailAgnostic = false;
6186     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
6187     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
6188     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
6189     if (UseMI) {
6190       UseMI = elideCopies(UseMI, MRI);
6191       if (UseMI && UseMI->isImplicitDef())
6192         TailAgnostic = true;
6193     }
6194   }
6195 
6196   // For simplicity we reuse the vtype representation here.
6197   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
6198                                      /*TailAgnostic*/ TailAgnostic,
6199                                      /*MaskAgnostic*/ false));
6200 
6201   // Remove (now) redundant operands from pseudo
6202   if (VLIndex >= 0) {
6203     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
6204     MI.getOperand(VLIndex).setIsKill(false);
6205   }
6206 
6207   return BB;
6208 }
6209 
6210 MachineBasicBlock *
6211 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6212                                                  MachineBasicBlock *BB) const {
6213   uint64_t TSFlags = MI.getDesc().TSFlags;
6214 
6215   if (TSFlags & RISCVII::HasSEWOpMask) {
6216     unsigned NumOperands = MI.getNumExplicitOperands();
6217     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
6218     unsigned SEWIndex = NumOperands - 1;
6219     bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask;
6220 
6221     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
6222                                                RISCVII::VLMulShift);
6223     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
6224   }
6225 
6226   switch (MI.getOpcode()) {
6227   default:
6228     llvm_unreachable("Unexpected instr type to insert");
6229   case RISCV::ReadCycleWide:
6230     assert(!Subtarget.is64Bit() &&
6231            "ReadCycleWrite is only to be used on riscv32");
6232     return emitReadCycleWidePseudo(MI, BB);
6233   case RISCV::Select_GPR_Using_CC_GPR:
6234   case RISCV::Select_FPR16_Using_CC_GPR:
6235   case RISCV::Select_FPR32_Using_CC_GPR:
6236   case RISCV::Select_FPR64_Using_CC_GPR:
6237     return emitSelectPseudo(MI, BB);
6238   case RISCV::BuildPairF64Pseudo:
6239     return emitBuildPairF64Pseudo(MI, BB);
6240   case RISCV::SplitF64Pseudo:
6241     return emitSplitF64Pseudo(MI, BB);
6242   }
6243 }
6244 
6245 // Calling Convention Implementation.
6246 // The expectations for frontend ABI lowering vary from target to target.
6247 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6248 // details, but this is a longer term goal. For now, we simply try to keep the
6249 // role of the frontend as simple and well-defined as possible. The rules can
6250 // be summarised as:
6251 // * Never split up large scalar arguments. We handle them here.
6252 // * If a hardfloat calling convention is being used, and the struct may be
6253 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6254 // available, then pass as two separate arguments. If either the GPRs or FPRs
6255 // are exhausted, then pass according to the rule below.
6256 // * If a struct could never be passed in registers or directly in a stack
6257 // slot (as it is larger than 2*XLEN and the floating point rules don't
6258 // apply), then pass it using a pointer with the byval attribute.
6259 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6260 // word-sized array or a 2*XLEN scalar (depending on alignment).
6261 // * The frontend can determine whether a struct is returned by reference or
6262 // not based on its size and fields. If it will be returned by reference, the
6263 // frontend must modify the prototype so a pointer with the sret annotation is
6264 // passed as the first argument. This is not necessary for large scalar
6265 // returns.
6266 // * Struct return values and varargs should be coerced to structs containing
6267 // register-size fields in the same situations they would be for fixed
6268 // arguments.
6269 
6270 static const MCPhysReg ArgGPRs[] = {
6271   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6272   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6273 };
6274 static const MCPhysReg ArgFPR16s[] = {
6275   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6276   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6277 };
6278 static const MCPhysReg ArgFPR32s[] = {
6279   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6280   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6281 };
6282 static const MCPhysReg ArgFPR64s[] = {
6283   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6284   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6285 };
6286 // This is an interim calling convention and it may be changed in the future.
6287 static const MCPhysReg ArgVRs[] = {
6288     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6289     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6290     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6291 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6292                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6293                                      RISCV::V20M2, RISCV::V22M2};
6294 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6295                                      RISCV::V20M4};
6296 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6297 
6298 // Pass a 2*XLEN argument that has been split into two XLEN values through
6299 // registers or the stack as necessary.
6300 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6301                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6302                                 MVT ValVT2, MVT LocVT2,
6303                                 ISD::ArgFlagsTy ArgFlags2) {
6304   unsigned XLenInBytes = XLen / 8;
6305   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6306     // At least one half can be passed via register.
6307     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6308                                      VA1.getLocVT(), CCValAssign::Full));
6309   } else {
6310     // Both halves must be passed on the stack, with proper alignment.
6311     Align StackAlign =
6312         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6313     State.addLoc(
6314         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6315                             State.AllocateStack(XLenInBytes, StackAlign),
6316                             VA1.getLocVT(), CCValAssign::Full));
6317     State.addLoc(CCValAssign::getMem(
6318         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6319         LocVT2, CCValAssign::Full));
6320     return false;
6321   }
6322 
6323   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6324     // The second half can also be passed via register.
6325     State.addLoc(
6326         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6327   } else {
6328     // The second half is passed via the stack, without additional alignment.
6329     State.addLoc(CCValAssign::getMem(
6330         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6331         LocVT2, CCValAssign::Full));
6332   }
6333 
6334   return false;
6335 }
6336 
6337 // Implements the RISC-V calling convention. Returns true upon failure.
6338 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6339                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6340                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6341                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6342                      Optional<unsigned> FirstMaskArgument) {
6343   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6344   assert(XLen == 32 || XLen == 64);
6345   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6346 
6347   // Any return value split in to more than two values can't be returned
6348   // directly. Vectors are returned via the available vector registers.
6349   if (!LocVT.isVector() && IsRet && ValNo > 1)
6350     return true;
6351 
6352   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6353   // variadic argument, or if no F16/F32 argument registers are available.
6354   bool UseGPRForF16_F32 = true;
6355   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6356   // variadic argument, or if no F64 argument registers are available.
6357   bool UseGPRForF64 = true;
6358 
6359   switch (ABI) {
6360   default:
6361     llvm_unreachable("Unexpected ABI");
6362   case RISCVABI::ABI_ILP32:
6363   case RISCVABI::ABI_LP64:
6364     break;
6365   case RISCVABI::ABI_ILP32F:
6366   case RISCVABI::ABI_LP64F:
6367     UseGPRForF16_F32 = !IsFixed;
6368     break;
6369   case RISCVABI::ABI_ILP32D:
6370   case RISCVABI::ABI_LP64D:
6371     UseGPRForF16_F32 = !IsFixed;
6372     UseGPRForF64 = !IsFixed;
6373     break;
6374   }
6375 
6376   // FPR16, FPR32, and FPR64 alias each other.
6377   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6378     UseGPRForF16_F32 = true;
6379     UseGPRForF64 = true;
6380   }
6381 
6382   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6383   // similar local variables rather than directly checking against the target
6384   // ABI.
6385 
6386   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6387     LocVT = XLenVT;
6388     LocInfo = CCValAssign::BCvt;
6389   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6390     LocVT = MVT::i64;
6391     LocInfo = CCValAssign::BCvt;
6392   }
6393 
6394   // If this is a variadic argument, the RISC-V calling convention requires
6395   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6396   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6397   // be used regardless of whether the original argument was split during
6398   // legalisation or not. The argument will not be passed by registers if the
6399   // original type is larger than 2*XLEN, so the register alignment rule does
6400   // not apply.
6401   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6402   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6403       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6404     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6405     // Skip 'odd' register if necessary.
6406     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6407       State.AllocateReg(ArgGPRs);
6408   }
6409 
6410   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6411   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6412       State.getPendingArgFlags();
6413 
6414   assert(PendingLocs.size() == PendingArgFlags.size() &&
6415          "PendingLocs and PendingArgFlags out of sync");
6416 
6417   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6418   // registers are exhausted.
6419   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6420     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6421            "Can't lower f64 if it is split");
6422     // Depending on available argument GPRS, f64 may be passed in a pair of
6423     // GPRs, split between a GPR and the stack, or passed completely on the
6424     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6425     // cases.
6426     Register Reg = State.AllocateReg(ArgGPRs);
6427     LocVT = MVT::i32;
6428     if (!Reg) {
6429       unsigned StackOffset = State.AllocateStack(8, Align(8));
6430       State.addLoc(
6431           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6432       return false;
6433     }
6434     if (!State.AllocateReg(ArgGPRs))
6435       State.AllocateStack(4, Align(4));
6436     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6437     return false;
6438   }
6439 
6440   // Fixed-length vectors are located in the corresponding scalable-vector
6441   // container types.
6442   if (ValVT.isFixedLengthVector())
6443     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6444 
6445   // Split arguments might be passed indirectly, so keep track of the pending
6446   // values. Split vectors are passed via a mix of registers and indirectly, so
6447   // treat them as we would any other argument.
6448   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6449     LocVT = XLenVT;
6450     LocInfo = CCValAssign::Indirect;
6451     PendingLocs.push_back(
6452         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6453     PendingArgFlags.push_back(ArgFlags);
6454     if (!ArgFlags.isSplitEnd()) {
6455       return false;
6456     }
6457   }
6458 
6459   // If the split argument only had two elements, it should be passed directly
6460   // in registers or on the stack.
6461   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6462     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6463     // Apply the normal calling convention rules to the first half of the
6464     // split argument.
6465     CCValAssign VA = PendingLocs[0];
6466     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6467     PendingLocs.clear();
6468     PendingArgFlags.clear();
6469     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6470                                ArgFlags);
6471   }
6472 
6473   // Allocate to a register if possible, or else a stack slot.
6474   Register Reg;
6475   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6476     Reg = State.AllocateReg(ArgFPR16s);
6477   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6478     Reg = State.AllocateReg(ArgFPR32s);
6479   else if (ValVT == MVT::f64 && !UseGPRForF64)
6480     Reg = State.AllocateReg(ArgFPR64s);
6481   else if (ValVT.isVector()) {
6482     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6483     if (RC == &RISCV::VRRegClass) {
6484       // Assign the first mask argument to V0.
6485       // This is an interim calling convention and it may be changed in the
6486       // future.
6487       if (FirstMaskArgument.hasValue() &&
6488           ValNo == FirstMaskArgument.getValue()) {
6489         Reg = State.AllocateReg(RISCV::V0);
6490       } else {
6491         Reg = State.AllocateReg(ArgVRs);
6492       }
6493     } else if (RC == &RISCV::VRM2RegClass) {
6494       Reg = State.AllocateReg(ArgVRM2s);
6495     } else if (RC == &RISCV::VRM4RegClass) {
6496       Reg = State.AllocateReg(ArgVRM4s);
6497     } else if (RC == &RISCV::VRM8RegClass) {
6498       Reg = State.AllocateReg(ArgVRM8s);
6499     } else {
6500       llvm_unreachable("Unhandled class register for ValueType");
6501     }
6502     if (!Reg) {
6503       // For return values, the vector must be passed fully via registers or
6504       // via the stack.
6505       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6506       // but we're using all of them.
6507       if (IsRet)
6508         return true;
6509       LocInfo = CCValAssign::Indirect;
6510       // Try using a GPR to pass the address
6511       Reg = State.AllocateReg(ArgGPRs);
6512       LocVT = XLenVT;
6513     }
6514   } else
6515     Reg = State.AllocateReg(ArgGPRs);
6516   unsigned StackOffset =
6517       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
6518 
6519   // If we reach this point and PendingLocs is non-empty, we must be at the
6520   // end of a split argument that must be passed indirectly.
6521   if (!PendingLocs.empty()) {
6522     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6523     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6524 
6525     for (auto &It : PendingLocs) {
6526       if (Reg)
6527         It.convertToReg(Reg);
6528       else
6529         It.convertToMem(StackOffset);
6530       State.addLoc(It);
6531     }
6532     PendingLocs.clear();
6533     PendingArgFlags.clear();
6534     return false;
6535   }
6536 
6537   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6538           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6539          "Expected an XLenVT or vector types at this stage");
6540 
6541   if (Reg) {
6542     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6543     return false;
6544   }
6545 
6546   // When a floating-point value is passed on the stack, no bit-conversion is
6547   // needed.
6548   if (ValVT.isFloatingPoint()) {
6549     LocVT = ValVT;
6550     LocInfo = CCValAssign::Full;
6551   }
6552   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6553   return false;
6554 }
6555 
6556 template <typename ArgTy>
6557 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6558   for (const auto &ArgIdx : enumerate(Args)) {
6559     MVT ArgVT = ArgIdx.value().VT;
6560     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6561       return ArgIdx.index();
6562   }
6563   return None;
6564 }
6565 
6566 void RISCVTargetLowering::analyzeInputArgs(
6567     MachineFunction &MF, CCState &CCInfo,
6568     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
6569   unsigned NumArgs = Ins.size();
6570   FunctionType *FType = MF.getFunction().getFunctionType();
6571 
6572   Optional<unsigned> FirstMaskArgument;
6573   if (Subtarget.hasStdExtV())
6574     FirstMaskArgument = preAssignMask(Ins);
6575 
6576   for (unsigned i = 0; i != NumArgs; ++i) {
6577     MVT ArgVT = Ins[i].VT;
6578     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6579 
6580     Type *ArgTy = nullptr;
6581     if (IsRet)
6582       ArgTy = FType->getReturnType();
6583     else if (Ins[i].isOrigArg())
6584       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6585 
6586     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6587     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6588                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
6589                  FirstMaskArgument)) {
6590       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
6591                         << EVT(ArgVT).getEVTString() << '\n');
6592       llvm_unreachable(nullptr);
6593     }
6594   }
6595 }
6596 
6597 void RISCVTargetLowering::analyzeOutputArgs(
6598     MachineFunction &MF, CCState &CCInfo,
6599     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
6600     CallLoweringInfo *CLI) const {
6601   unsigned NumArgs = Outs.size();
6602 
6603   Optional<unsigned> FirstMaskArgument;
6604   if (Subtarget.hasStdExtV())
6605     FirstMaskArgument = preAssignMask(Outs);
6606 
6607   for (unsigned i = 0; i != NumArgs; i++) {
6608     MVT ArgVT = Outs[i].VT;
6609     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6610     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
6611 
6612     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6613     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6614                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
6615                  FirstMaskArgument)) {
6616       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
6617                         << EVT(ArgVT).getEVTString() << "\n");
6618       llvm_unreachable(nullptr);
6619     }
6620   }
6621 }
6622 
6623 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
6624 // values.
6625 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
6626                                    const CCValAssign &VA, const SDLoc &DL,
6627                                    const RISCVSubtarget &Subtarget) {
6628   switch (VA.getLocInfo()) {
6629   default:
6630     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6631   case CCValAssign::Full:
6632     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
6633       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
6634     break;
6635   case CCValAssign::BCvt:
6636     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6637       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
6638     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6639       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
6640     else
6641       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6642     break;
6643   }
6644   return Val;
6645 }
6646 
6647 // The caller is responsible for loading the full value if the argument is
6648 // passed with CCValAssign::Indirect.
6649 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
6650                                 const CCValAssign &VA, const SDLoc &DL,
6651                                 const RISCVTargetLowering &TLI) {
6652   MachineFunction &MF = DAG.getMachineFunction();
6653   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6654   EVT LocVT = VA.getLocVT();
6655   SDValue Val;
6656   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
6657   Register VReg = RegInfo.createVirtualRegister(RC);
6658   RegInfo.addLiveIn(VA.getLocReg(), VReg);
6659   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
6660 
6661   if (VA.getLocInfo() == CCValAssign::Indirect)
6662     return Val;
6663 
6664   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
6665 }
6666 
6667 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
6668                                    const CCValAssign &VA, const SDLoc &DL,
6669                                    const RISCVSubtarget &Subtarget) {
6670   EVT LocVT = VA.getLocVT();
6671 
6672   switch (VA.getLocInfo()) {
6673   default:
6674     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6675   case CCValAssign::Full:
6676     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
6677       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
6678     break;
6679   case CCValAssign::BCvt:
6680     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6681       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
6682     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6683       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
6684     else
6685       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
6686     break;
6687   }
6688   return Val;
6689 }
6690 
6691 // The caller is responsible for loading the full value if the argument is
6692 // passed with CCValAssign::Indirect.
6693 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
6694                                 const CCValAssign &VA, const SDLoc &DL) {
6695   MachineFunction &MF = DAG.getMachineFunction();
6696   MachineFrameInfo &MFI = MF.getFrameInfo();
6697   EVT LocVT = VA.getLocVT();
6698   EVT ValVT = VA.getValVT();
6699   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
6700   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
6701                                  VA.getLocMemOffset(), /*Immutable=*/true);
6702   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6703   SDValue Val;
6704 
6705   ISD::LoadExtType ExtType;
6706   switch (VA.getLocInfo()) {
6707   default:
6708     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6709   case CCValAssign::Full:
6710   case CCValAssign::Indirect:
6711   case CCValAssign::BCvt:
6712     ExtType = ISD::NON_EXTLOAD;
6713     break;
6714   }
6715   Val = DAG.getExtLoad(
6716       ExtType, DL, LocVT, Chain, FIN,
6717       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
6718   return Val;
6719 }
6720 
6721 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
6722                                        const CCValAssign &VA, const SDLoc &DL) {
6723   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
6724          "Unexpected VA");
6725   MachineFunction &MF = DAG.getMachineFunction();
6726   MachineFrameInfo &MFI = MF.getFrameInfo();
6727   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6728 
6729   if (VA.isMemLoc()) {
6730     // f64 is passed on the stack.
6731     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
6732     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
6733     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
6734                        MachinePointerInfo::getFixedStack(MF, FI));
6735   }
6736 
6737   assert(VA.isRegLoc() && "Expected register VA assignment");
6738 
6739   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6740   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
6741   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
6742   SDValue Hi;
6743   if (VA.getLocReg() == RISCV::X17) {
6744     // Second half of f64 is passed on the stack.
6745     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
6746     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
6747     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
6748                      MachinePointerInfo::getFixedStack(MF, FI));
6749   } else {
6750     // Second half of f64 is passed in another GPR.
6751     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6752     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
6753     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
6754   }
6755   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
6756 }
6757 
6758 // FastCC has less than 1% performance improvement for some particular
6759 // benchmark. But theoretically, it may has benenfit for some cases.
6760 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
6761                             CCValAssign::LocInfo LocInfo,
6762                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
6763 
6764   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
6765     // X5 and X6 might be used for save-restore libcall.
6766     static const MCPhysReg GPRList[] = {
6767         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
6768         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
6769         RISCV::X29, RISCV::X30, RISCV::X31};
6770     if (unsigned Reg = State.AllocateReg(GPRList)) {
6771       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6772       return false;
6773     }
6774   }
6775 
6776   if (LocVT == MVT::f16) {
6777     static const MCPhysReg FPR16List[] = {
6778         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
6779         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
6780         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
6781         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
6782     if (unsigned Reg = State.AllocateReg(FPR16List)) {
6783       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6784       return false;
6785     }
6786   }
6787 
6788   if (LocVT == MVT::f32) {
6789     static const MCPhysReg FPR32List[] = {
6790         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
6791         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
6792         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
6793         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
6794     if (unsigned Reg = State.AllocateReg(FPR32List)) {
6795       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6796       return false;
6797     }
6798   }
6799 
6800   if (LocVT == MVT::f64) {
6801     static const MCPhysReg FPR64List[] = {
6802         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
6803         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
6804         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
6805         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
6806     if (unsigned Reg = State.AllocateReg(FPR64List)) {
6807       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6808       return false;
6809     }
6810   }
6811 
6812   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
6813     unsigned Offset4 = State.AllocateStack(4, Align(4));
6814     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
6815     return false;
6816   }
6817 
6818   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
6819     unsigned Offset5 = State.AllocateStack(8, Align(8));
6820     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
6821     return false;
6822   }
6823 
6824   return true; // CC didn't match.
6825 }
6826 
6827 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
6828                          CCValAssign::LocInfo LocInfo,
6829                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
6830 
6831   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
6832     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
6833     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
6834     static const MCPhysReg GPRList[] = {
6835         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
6836         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
6837     if (unsigned Reg = State.AllocateReg(GPRList)) {
6838       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6839       return false;
6840     }
6841   }
6842 
6843   if (LocVT == MVT::f32) {
6844     // Pass in STG registers: F1, ..., F6
6845     //                        fs0 ... fs5
6846     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
6847                                           RISCV::F18_F, RISCV::F19_F,
6848                                           RISCV::F20_F, RISCV::F21_F};
6849     if (unsigned Reg = State.AllocateReg(FPR32List)) {
6850       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6851       return false;
6852     }
6853   }
6854 
6855   if (LocVT == MVT::f64) {
6856     // Pass in STG registers: D1, ..., D6
6857     //                        fs6 ... fs11
6858     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
6859                                           RISCV::F24_D, RISCV::F25_D,
6860                                           RISCV::F26_D, RISCV::F27_D};
6861     if (unsigned Reg = State.AllocateReg(FPR64List)) {
6862       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6863       return false;
6864     }
6865   }
6866 
6867   report_fatal_error("No registers left in GHC calling convention");
6868   return true;
6869 }
6870 
6871 // Transform physical registers into virtual registers.
6872 SDValue RISCVTargetLowering::LowerFormalArguments(
6873     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
6874     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
6875     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6876 
6877   MachineFunction &MF = DAG.getMachineFunction();
6878 
6879   switch (CallConv) {
6880   default:
6881     report_fatal_error("Unsupported calling convention");
6882   case CallingConv::C:
6883   case CallingConv::Fast:
6884     break;
6885   case CallingConv::GHC:
6886     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
6887         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
6888       report_fatal_error(
6889         "GHC calling convention requires the F and D instruction set extensions");
6890   }
6891 
6892   const Function &Func = MF.getFunction();
6893   if (Func.hasFnAttribute("interrupt")) {
6894     if (!Func.arg_empty())
6895       report_fatal_error(
6896         "Functions with the interrupt attribute cannot have arguments!");
6897 
6898     StringRef Kind =
6899       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
6900 
6901     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
6902       report_fatal_error(
6903         "Function interrupt attribute argument not supported!");
6904   }
6905 
6906   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6907   MVT XLenVT = Subtarget.getXLenVT();
6908   unsigned XLenInBytes = Subtarget.getXLen() / 8;
6909   // Used with vargs to acumulate store chains.
6910   std::vector<SDValue> OutChains;
6911 
6912   // Assign locations to all of the incoming arguments.
6913   SmallVector<CCValAssign, 16> ArgLocs;
6914   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6915 
6916   if (CallConv == CallingConv::Fast)
6917     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
6918   else if (CallConv == CallingConv::GHC)
6919     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
6920   else
6921     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
6922 
6923   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
6924     CCValAssign &VA = ArgLocs[i];
6925     SDValue ArgValue;
6926     // Passing f64 on RV32D with a soft float ABI must be handled as a special
6927     // case.
6928     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
6929       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
6930     else if (VA.isRegLoc())
6931       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
6932     else
6933       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
6934 
6935     if (VA.getLocInfo() == CCValAssign::Indirect) {
6936       // If the original argument was split and passed by reference (e.g. i128
6937       // on RV32), we need to load all parts of it here (using the same
6938       // address). Vectors may be partly split to registers and partly to the
6939       // stack, in which case the base address is partly offset and subsequent
6940       // stores are relative to that.
6941       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
6942                                    MachinePointerInfo()));
6943       unsigned ArgIndex = Ins[i].OrigArgIndex;
6944       unsigned ArgPartOffset = Ins[i].PartOffset;
6945       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
6946       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
6947         CCValAssign &PartVA = ArgLocs[i + 1];
6948         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
6949         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
6950                                       DAG.getIntPtrConstant(PartOffset, DL));
6951         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
6952                                      MachinePointerInfo()));
6953         ++i;
6954       }
6955       continue;
6956     }
6957     InVals.push_back(ArgValue);
6958   }
6959 
6960   if (IsVarArg) {
6961     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
6962     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
6963     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
6964     MachineFrameInfo &MFI = MF.getFrameInfo();
6965     MachineRegisterInfo &RegInfo = MF.getRegInfo();
6966     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
6967 
6968     // Offset of the first variable argument from stack pointer, and size of
6969     // the vararg save area. For now, the varargs save area is either zero or
6970     // large enough to hold a0-a7.
6971     int VaArgOffset, VarArgsSaveSize;
6972 
6973     // If all registers are allocated, then all varargs must be passed on the
6974     // stack and we don't need to save any argregs.
6975     if (ArgRegs.size() == Idx) {
6976       VaArgOffset = CCInfo.getNextStackOffset();
6977       VarArgsSaveSize = 0;
6978     } else {
6979       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
6980       VaArgOffset = -VarArgsSaveSize;
6981     }
6982 
6983     // Record the frame index of the first variable argument
6984     // which is a value necessary to VASTART.
6985     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
6986     RVFI->setVarArgsFrameIndex(FI);
6987 
6988     // If saving an odd number of registers then create an extra stack slot to
6989     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
6990     // offsets to even-numbered registered remain 2*XLEN-aligned.
6991     if (Idx % 2) {
6992       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
6993       VarArgsSaveSize += XLenInBytes;
6994     }
6995 
6996     // Copy the integer registers that may have been used for passing varargs
6997     // to the vararg save area.
6998     for (unsigned I = Idx; I < ArgRegs.size();
6999          ++I, VaArgOffset += XLenInBytes) {
7000       const Register Reg = RegInfo.createVirtualRegister(RC);
7001       RegInfo.addLiveIn(ArgRegs[I], Reg);
7002       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
7003       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
7004       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7005       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
7006                                    MachinePointerInfo::getFixedStack(MF, FI));
7007       cast<StoreSDNode>(Store.getNode())
7008           ->getMemOperand()
7009           ->setValue((Value *)nullptr);
7010       OutChains.push_back(Store);
7011     }
7012     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
7013   }
7014 
7015   // All stores are grouped in one node to allow the matching between
7016   // the size of Ins and InVals. This only happens for vararg functions.
7017   if (!OutChains.empty()) {
7018     OutChains.push_back(Chain);
7019     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
7020   }
7021 
7022   return Chain;
7023 }
7024 
7025 /// isEligibleForTailCallOptimization - Check whether the call is eligible
7026 /// for tail call optimization.
7027 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
7028 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
7029     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
7030     const SmallVector<CCValAssign, 16> &ArgLocs) const {
7031 
7032   auto &Callee = CLI.Callee;
7033   auto CalleeCC = CLI.CallConv;
7034   auto &Outs = CLI.Outs;
7035   auto &Caller = MF.getFunction();
7036   auto CallerCC = Caller.getCallingConv();
7037 
7038   // Exception-handling functions need a special set of instructions to
7039   // indicate a return to the hardware. Tail-calling another function would
7040   // probably break this.
7041   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
7042   // should be expanded as new function attributes are introduced.
7043   if (Caller.hasFnAttribute("interrupt"))
7044     return false;
7045 
7046   // Do not tail call opt if the stack is used to pass parameters.
7047   if (CCInfo.getNextStackOffset() != 0)
7048     return false;
7049 
7050   // Do not tail call opt if any parameters need to be passed indirectly.
7051   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
7052   // passed indirectly. So the address of the value will be passed in a
7053   // register, or if not available, then the address is put on the stack. In
7054   // order to pass indirectly, space on the stack often needs to be allocated
7055   // in order to store the value. In this case the CCInfo.getNextStackOffset()
7056   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
7057   // are passed CCValAssign::Indirect.
7058   for (auto &VA : ArgLocs)
7059     if (VA.getLocInfo() == CCValAssign::Indirect)
7060       return false;
7061 
7062   // Do not tail call opt if either caller or callee uses struct return
7063   // semantics.
7064   auto IsCallerStructRet = Caller.hasStructRetAttr();
7065   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
7066   if (IsCallerStructRet || IsCalleeStructRet)
7067     return false;
7068 
7069   // Externally-defined functions with weak linkage should not be
7070   // tail-called. The behaviour of branch instructions in this situation (as
7071   // used for tail calls) is implementation-defined, so we cannot rely on the
7072   // linker replacing the tail call with a return.
7073   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
7074     const GlobalValue *GV = G->getGlobal();
7075     if (GV->hasExternalWeakLinkage())
7076       return false;
7077   }
7078 
7079   // The callee has to preserve all registers the caller needs to preserve.
7080   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
7081   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
7082   if (CalleeCC != CallerCC) {
7083     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
7084     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
7085       return false;
7086   }
7087 
7088   // Byval parameters hand the function a pointer directly into the stack area
7089   // we want to reuse during a tail call. Working around this *is* possible
7090   // but less efficient and uglier in LowerCall.
7091   for (auto &Arg : Outs)
7092     if (Arg.Flags.isByVal())
7093       return false;
7094 
7095   return true;
7096 }
7097 
7098 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
7099 // and output parameter nodes.
7100 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
7101                                        SmallVectorImpl<SDValue> &InVals) const {
7102   SelectionDAG &DAG = CLI.DAG;
7103   SDLoc &DL = CLI.DL;
7104   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
7105   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
7106   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
7107   SDValue Chain = CLI.Chain;
7108   SDValue Callee = CLI.Callee;
7109   bool &IsTailCall = CLI.IsTailCall;
7110   CallingConv::ID CallConv = CLI.CallConv;
7111   bool IsVarArg = CLI.IsVarArg;
7112   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7113   MVT XLenVT = Subtarget.getXLenVT();
7114 
7115   MachineFunction &MF = DAG.getMachineFunction();
7116 
7117   // Analyze the operands of the call, assigning locations to each operand.
7118   SmallVector<CCValAssign, 16> ArgLocs;
7119   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
7120 
7121   if (CallConv == CallingConv::Fast)
7122     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
7123   else if (CallConv == CallingConv::GHC)
7124     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
7125   else
7126     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
7127 
7128   // Check if it's really possible to do a tail call.
7129   if (IsTailCall)
7130     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
7131 
7132   if (IsTailCall)
7133     ++NumTailCalls;
7134   else if (CLI.CB && CLI.CB->isMustTailCall())
7135     report_fatal_error("failed to perform tail call elimination on a call "
7136                        "site marked musttail");
7137 
7138   // Get a count of how many bytes are to be pushed on the stack.
7139   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
7140 
7141   // Create local copies for byval args
7142   SmallVector<SDValue, 8> ByValArgs;
7143   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7144     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7145     if (!Flags.isByVal())
7146       continue;
7147 
7148     SDValue Arg = OutVals[i];
7149     unsigned Size = Flags.getByValSize();
7150     Align Alignment = Flags.getNonZeroByValAlign();
7151 
7152     int FI =
7153         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
7154     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
7155     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
7156 
7157     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
7158                           /*IsVolatile=*/false,
7159                           /*AlwaysInline=*/false, IsTailCall,
7160                           MachinePointerInfo(), MachinePointerInfo());
7161     ByValArgs.push_back(FIPtr);
7162   }
7163 
7164   if (!IsTailCall)
7165     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
7166 
7167   // Copy argument values to their designated locations.
7168   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
7169   SmallVector<SDValue, 8> MemOpChains;
7170   SDValue StackPtr;
7171   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
7172     CCValAssign &VA = ArgLocs[i];
7173     SDValue ArgValue = OutVals[i];
7174     ISD::ArgFlagsTy Flags = Outs[i].Flags;
7175 
7176     // Handle passing f64 on RV32D with a soft float ABI as a special case.
7177     bool IsF64OnRV32DSoftABI =
7178         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
7179     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
7180       SDValue SplitF64 = DAG.getNode(
7181           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
7182       SDValue Lo = SplitF64.getValue(0);
7183       SDValue Hi = SplitF64.getValue(1);
7184 
7185       Register RegLo = VA.getLocReg();
7186       RegsToPass.push_back(std::make_pair(RegLo, Lo));
7187 
7188       if (RegLo == RISCV::X17) {
7189         // Second half of f64 is passed on the stack.
7190         // Work out the address of the stack slot.
7191         if (!StackPtr.getNode())
7192           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7193         // Emit the store.
7194         MemOpChains.push_back(
7195             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
7196       } else {
7197         // Second half of f64 is passed in another GPR.
7198         assert(RegLo < RISCV::X31 && "Invalid register pair");
7199         Register RegHigh = RegLo + 1;
7200         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
7201       }
7202       continue;
7203     }
7204 
7205     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
7206     // as any other MemLoc.
7207 
7208     // Promote the value if needed.
7209     // For now, only handle fully promoted and indirect arguments.
7210     if (VA.getLocInfo() == CCValAssign::Indirect) {
7211       // Store the argument in a stack slot and pass its address.
7212       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
7213       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7214       MemOpChains.push_back(
7215           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7216                        MachinePointerInfo::getFixedStack(MF, FI)));
7217       // If the original argument was split (e.g. i128), we need
7218       // to store the required parts of it here (and pass just one address).
7219       // Vectors may be partly split to registers and partly to the stack, in
7220       // which case the base address is partly offset and subsequent stores are
7221       // relative to that.
7222       unsigned ArgIndex = Outs[i].OrigArgIndex;
7223       unsigned ArgPartOffset = Outs[i].PartOffset;
7224       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7225       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7226         SDValue PartValue = OutVals[i + 1];
7227         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7228         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
7229                                       DAG.getIntPtrConstant(PartOffset, DL));
7230         MemOpChains.push_back(
7231             DAG.getStore(Chain, DL, PartValue, Address,
7232                          MachinePointerInfo::getFixedStack(MF, FI)));
7233         ++i;
7234       }
7235       ArgValue = SpillSlot;
7236     } else {
7237       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7238     }
7239 
7240     // Use local copy if it is a byval arg.
7241     if (Flags.isByVal())
7242       ArgValue = ByValArgs[j++];
7243 
7244     if (VA.isRegLoc()) {
7245       // Queue up the argument copies and emit them at the end.
7246       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7247     } else {
7248       assert(VA.isMemLoc() && "Argument not register or memory");
7249       assert(!IsTailCall && "Tail call not allowed if stack is used "
7250                             "for passing parameters");
7251 
7252       // Work out the address of the stack slot.
7253       if (!StackPtr.getNode())
7254         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7255       SDValue Address =
7256           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7257                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7258 
7259       // Emit the store.
7260       MemOpChains.push_back(
7261           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7262     }
7263   }
7264 
7265   // Join the stores, which are independent of one another.
7266   if (!MemOpChains.empty())
7267     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7268 
7269   SDValue Glue;
7270 
7271   // Build a sequence of copy-to-reg nodes, chained and glued together.
7272   for (auto &Reg : RegsToPass) {
7273     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7274     Glue = Chain.getValue(1);
7275   }
7276 
7277   // Validate that none of the argument registers have been marked as
7278   // reserved, if so report an error. Do the same for the return address if this
7279   // is not a tailcall.
7280   validateCCReservedRegs(RegsToPass, MF);
7281   if (!IsTailCall &&
7282       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7283     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7284         MF.getFunction(),
7285         "Return address register required, but has been reserved."});
7286 
7287   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7288   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7289   // split it and then direct call can be matched by PseudoCALL.
7290   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7291     const GlobalValue *GV = S->getGlobal();
7292 
7293     unsigned OpFlags = RISCVII::MO_CALL;
7294     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7295       OpFlags = RISCVII::MO_PLT;
7296 
7297     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7298   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7299     unsigned OpFlags = RISCVII::MO_CALL;
7300 
7301     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7302                                                  nullptr))
7303       OpFlags = RISCVII::MO_PLT;
7304 
7305     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7306   }
7307 
7308   // The first call operand is the chain and the second is the target address.
7309   SmallVector<SDValue, 8> Ops;
7310   Ops.push_back(Chain);
7311   Ops.push_back(Callee);
7312 
7313   // Add argument registers to the end of the list so that they are
7314   // known live into the call.
7315   for (auto &Reg : RegsToPass)
7316     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7317 
7318   if (!IsTailCall) {
7319     // Add a register mask operand representing the call-preserved registers.
7320     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7321     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7322     assert(Mask && "Missing call preserved mask for calling convention");
7323     Ops.push_back(DAG.getRegisterMask(Mask));
7324   }
7325 
7326   // Glue the call to the argument copies, if any.
7327   if (Glue.getNode())
7328     Ops.push_back(Glue);
7329 
7330   // Emit the call.
7331   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7332 
7333   if (IsTailCall) {
7334     MF.getFrameInfo().setHasTailCall();
7335     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7336   }
7337 
7338   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7339   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7340   Glue = Chain.getValue(1);
7341 
7342   // Mark the end of the call, which is glued to the call itself.
7343   Chain = DAG.getCALLSEQ_END(Chain,
7344                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7345                              DAG.getConstant(0, DL, PtrVT, true),
7346                              Glue, DL);
7347   Glue = Chain.getValue(1);
7348 
7349   // Assign locations to each value returned by this call.
7350   SmallVector<CCValAssign, 16> RVLocs;
7351   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7352   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
7353 
7354   // Copy all of the result registers out of their specified physreg.
7355   for (auto &VA : RVLocs) {
7356     // Copy the value out
7357     SDValue RetValue =
7358         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7359     // Glue the RetValue to the end of the call sequence
7360     Chain = RetValue.getValue(1);
7361     Glue = RetValue.getValue(2);
7362 
7363     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7364       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7365       SDValue RetValue2 =
7366           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7367       Chain = RetValue2.getValue(1);
7368       Glue = RetValue2.getValue(2);
7369       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7370                              RetValue2);
7371     }
7372 
7373     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7374 
7375     InVals.push_back(RetValue);
7376   }
7377 
7378   return Chain;
7379 }
7380 
7381 bool RISCVTargetLowering::CanLowerReturn(
7382     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7383     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7384   SmallVector<CCValAssign, 16> RVLocs;
7385   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7386 
7387   Optional<unsigned> FirstMaskArgument;
7388   if (Subtarget.hasStdExtV())
7389     FirstMaskArgument = preAssignMask(Outs);
7390 
7391   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7392     MVT VT = Outs[i].VT;
7393     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7394     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7395     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7396                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7397                  *this, FirstMaskArgument))
7398       return false;
7399   }
7400   return true;
7401 }
7402 
7403 SDValue
7404 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7405                                  bool IsVarArg,
7406                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7407                                  const SmallVectorImpl<SDValue> &OutVals,
7408                                  const SDLoc &DL, SelectionDAG &DAG) const {
7409   const MachineFunction &MF = DAG.getMachineFunction();
7410   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7411 
7412   // Stores the assignment of the return value to a location.
7413   SmallVector<CCValAssign, 16> RVLocs;
7414 
7415   // Info about the registers and stack slot.
7416   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7417                  *DAG.getContext());
7418 
7419   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7420                     nullptr);
7421 
7422   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7423     report_fatal_error("GHC functions return void only");
7424 
7425   SDValue Glue;
7426   SmallVector<SDValue, 4> RetOps(1, Chain);
7427 
7428   // Copy the result values into the output registers.
7429   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7430     SDValue Val = OutVals[i];
7431     CCValAssign &VA = RVLocs[i];
7432     assert(VA.isRegLoc() && "Can only return in registers!");
7433 
7434     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7435       // Handle returning f64 on RV32D with a soft float ABI.
7436       assert(VA.isRegLoc() && "Expected return via registers");
7437       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7438                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7439       SDValue Lo = SplitF64.getValue(0);
7440       SDValue Hi = SplitF64.getValue(1);
7441       Register RegLo = VA.getLocReg();
7442       assert(RegLo < RISCV::X31 && "Invalid register pair");
7443       Register RegHi = RegLo + 1;
7444 
7445       if (STI.isRegisterReservedByUser(RegLo) ||
7446           STI.isRegisterReservedByUser(RegHi))
7447         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7448             MF.getFunction(),
7449             "Return value register required, but has been reserved."});
7450 
7451       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7452       Glue = Chain.getValue(1);
7453       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7454       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7455       Glue = Chain.getValue(1);
7456       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7457     } else {
7458       // Handle a 'normal' return.
7459       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7460       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7461 
7462       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7463         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7464             MF.getFunction(),
7465             "Return value register required, but has been reserved."});
7466 
7467       // Guarantee that all emitted copies are stuck together.
7468       Glue = Chain.getValue(1);
7469       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7470     }
7471   }
7472 
7473   RetOps[0] = Chain; // Update chain.
7474 
7475   // Add the glue node if we have it.
7476   if (Glue.getNode()) {
7477     RetOps.push_back(Glue);
7478   }
7479 
7480   // Interrupt service routines use different return instructions.
7481   const Function &Func = DAG.getMachineFunction().getFunction();
7482   if (Func.hasFnAttribute("interrupt")) {
7483     if (!Func.getReturnType()->isVoidTy())
7484       report_fatal_error(
7485           "Functions with the interrupt attribute must have void return type!");
7486 
7487     MachineFunction &MF = DAG.getMachineFunction();
7488     StringRef Kind =
7489       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7490 
7491     unsigned RetOpc;
7492     if (Kind == "user")
7493       RetOpc = RISCVISD::URET_FLAG;
7494     else if (Kind == "supervisor")
7495       RetOpc = RISCVISD::SRET_FLAG;
7496     else
7497       RetOpc = RISCVISD::MRET_FLAG;
7498 
7499     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7500   }
7501 
7502   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7503 }
7504 
7505 void RISCVTargetLowering::validateCCReservedRegs(
7506     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7507     MachineFunction &MF) const {
7508   const Function &F = MF.getFunction();
7509   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7510 
7511   if (llvm::any_of(Regs, [&STI](auto Reg) {
7512         return STI.isRegisterReservedByUser(Reg.first);
7513       }))
7514     F.getContext().diagnose(DiagnosticInfoUnsupported{
7515         F, "Argument register required, but has been reserved."});
7516 }
7517 
7518 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7519   return CI->isTailCall();
7520 }
7521 
7522 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7523 #define NODE_NAME_CASE(NODE)                                                   \
7524   case RISCVISD::NODE:                                                         \
7525     return "RISCVISD::" #NODE;
7526   // clang-format off
7527   switch ((RISCVISD::NodeType)Opcode) {
7528   case RISCVISD::FIRST_NUMBER:
7529     break;
7530   NODE_NAME_CASE(RET_FLAG)
7531   NODE_NAME_CASE(URET_FLAG)
7532   NODE_NAME_CASE(SRET_FLAG)
7533   NODE_NAME_CASE(MRET_FLAG)
7534   NODE_NAME_CASE(CALL)
7535   NODE_NAME_CASE(SELECT_CC)
7536   NODE_NAME_CASE(BR_CC)
7537   NODE_NAME_CASE(BuildPairF64)
7538   NODE_NAME_CASE(SplitF64)
7539   NODE_NAME_CASE(TAIL)
7540   NODE_NAME_CASE(MULHSU)
7541   NODE_NAME_CASE(SLLW)
7542   NODE_NAME_CASE(SRAW)
7543   NODE_NAME_CASE(SRLW)
7544   NODE_NAME_CASE(DIVW)
7545   NODE_NAME_CASE(DIVUW)
7546   NODE_NAME_CASE(REMUW)
7547   NODE_NAME_CASE(ROLW)
7548   NODE_NAME_CASE(RORW)
7549   NODE_NAME_CASE(CLZW)
7550   NODE_NAME_CASE(CTZW)
7551   NODE_NAME_CASE(FSLW)
7552   NODE_NAME_CASE(FSRW)
7553   NODE_NAME_CASE(FSL)
7554   NODE_NAME_CASE(FSR)
7555   NODE_NAME_CASE(FMV_H_X)
7556   NODE_NAME_CASE(FMV_X_ANYEXTH)
7557   NODE_NAME_CASE(FMV_W_X_RV64)
7558   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
7559   NODE_NAME_CASE(READ_CYCLE_WIDE)
7560   NODE_NAME_CASE(GREV)
7561   NODE_NAME_CASE(GREVW)
7562   NODE_NAME_CASE(GORC)
7563   NODE_NAME_CASE(GORCW)
7564   NODE_NAME_CASE(SHFL)
7565   NODE_NAME_CASE(SHFLW)
7566   NODE_NAME_CASE(UNSHFL)
7567   NODE_NAME_CASE(UNSHFLW)
7568   NODE_NAME_CASE(BCOMPRESS)
7569   NODE_NAME_CASE(BCOMPRESSW)
7570   NODE_NAME_CASE(BDECOMPRESS)
7571   NODE_NAME_CASE(BDECOMPRESSW)
7572   NODE_NAME_CASE(VMV_V_X_VL)
7573   NODE_NAME_CASE(VFMV_V_F_VL)
7574   NODE_NAME_CASE(VMV_X_S)
7575   NODE_NAME_CASE(VMV_S_X_VL)
7576   NODE_NAME_CASE(VFMV_S_F_VL)
7577   NODE_NAME_CASE(SPLAT_VECTOR_I64)
7578   NODE_NAME_CASE(READ_VLENB)
7579   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
7580   NODE_NAME_CASE(VLEFF)
7581   NODE_NAME_CASE(VLEFF_MASK)
7582   NODE_NAME_CASE(VSLIDEUP_VL)
7583   NODE_NAME_CASE(VSLIDE1UP_VL)
7584   NODE_NAME_CASE(VSLIDEDOWN_VL)
7585   NODE_NAME_CASE(VSLIDE1DOWN_VL)
7586   NODE_NAME_CASE(VID_VL)
7587   NODE_NAME_CASE(VFNCVT_ROD_VL)
7588   NODE_NAME_CASE(VECREDUCE_ADD_VL)
7589   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
7590   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
7591   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
7592   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
7593   NODE_NAME_CASE(VECREDUCE_AND_VL)
7594   NODE_NAME_CASE(VECREDUCE_OR_VL)
7595   NODE_NAME_CASE(VECREDUCE_XOR_VL)
7596   NODE_NAME_CASE(VECREDUCE_FADD_VL)
7597   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
7598   NODE_NAME_CASE(ADD_VL)
7599   NODE_NAME_CASE(AND_VL)
7600   NODE_NAME_CASE(MUL_VL)
7601   NODE_NAME_CASE(OR_VL)
7602   NODE_NAME_CASE(SDIV_VL)
7603   NODE_NAME_CASE(SHL_VL)
7604   NODE_NAME_CASE(SREM_VL)
7605   NODE_NAME_CASE(SRA_VL)
7606   NODE_NAME_CASE(SRL_VL)
7607   NODE_NAME_CASE(SUB_VL)
7608   NODE_NAME_CASE(UDIV_VL)
7609   NODE_NAME_CASE(UREM_VL)
7610   NODE_NAME_CASE(XOR_VL)
7611   NODE_NAME_CASE(FADD_VL)
7612   NODE_NAME_CASE(FSUB_VL)
7613   NODE_NAME_CASE(FMUL_VL)
7614   NODE_NAME_CASE(FDIV_VL)
7615   NODE_NAME_CASE(FNEG_VL)
7616   NODE_NAME_CASE(FABS_VL)
7617   NODE_NAME_CASE(FSQRT_VL)
7618   NODE_NAME_CASE(FMA_VL)
7619   NODE_NAME_CASE(FCOPYSIGN_VL)
7620   NODE_NAME_CASE(SMIN_VL)
7621   NODE_NAME_CASE(SMAX_VL)
7622   NODE_NAME_CASE(UMIN_VL)
7623   NODE_NAME_CASE(UMAX_VL)
7624   NODE_NAME_CASE(FMINNUM_VL)
7625   NODE_NAME_CASE(FMAXNUM_VL)
7626   NODE_NAME_CASE(MULHS_VL)
7627   NODE_NAME_CASE(MULHU_VL)
7628   NODE_NAME_CASE(FP_TO_SINT_VL)
7629   NODE_NAME_CASE(FP_TO_UINT_VL)
7630   NODE_NAME_CASE(SINT_TO_FP_VL)
7631   NODE_NAME_CASE(UINT_TO_FP_VL)
7632   NODE_NAME_CASE(FP_EXTEND_VL)
7633   NODE_NAME_CASE(FP_ROUND_VL)
7634   NODE_NAME_CASE(SETCC_VL)
7635   NODE_NAME_CASE(VSELECT_VL)
7636   NODE_NAME_CASE(VMAND_VL)
7637   NODE_NAME_CASE(VMOR_VL)
7638   NODE_NAME_CASE(VMXOR_VL)
7639   NODE_NAME_CASE(VMCLR_VL)
7640   NODE_NAME_CASE(VMSET_VL)
7641   NODE_NAME_CASE(VRGATHER_VX_VL)
7642   NODE_NAME_CASE(VRGATHER_VV_VL)
7643   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
7644   NODE_NAME_CASE(VSEXT_VL)
7645   NODE_NAME_CASE(VZEXT_VL)
7646   NODE_NAME_CASE(VPOPC_VL)
7647   NODE_NAME_CASE(VLE_VL)
7648   NODE_NAME_CASE(VSE_VL)
7649   NODE_NAME_CASE(READ_CSR)
7650   NODE_NAME_CASE(WRITE_CSR)
7651   NODE_NAME_CASE(SWAP_CSR)
7652   }
7653   // clang-format on
7654   return nullptr;
7655 #undef NODE_NAME_CASE
7656 }
7657 
7658 /// getConstraintType - Given a constraint letter, return the type of
7659 /// constraint it is for this target.
7660 RISCVTargetLowering::ConstraintType
7661 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
7662   if (Constraint.size() == 1) {
7663     switch (Constraint[0]) {
7664     default:
7665       break;
7666     case 'f':
7667     case 'v':
7668       return C_RegisterClass;
7669     case 'I':
7670     case 'J':
7671     case 'K':
7672       return C_Immediate;
7673     case 'A':
7674       return C_Memory;
7675     }
7676   }
7677   return TargetLowering::getConstraintType(Constraint);
7678 }
7679 
7680 std::pair<unsigned, const TargetRegisterClass *>
7681 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
7682                                                   StringRef Constraint,
7683                                                   MVT VT) const {
7684   // First, see if this is a constraint that directly corresponds to a
7685   // RISCV register class.
7686   if (Constraint.size() == 1) {
7687     switch (Constraint[0]) {
7688     case 'r':
7689       return std::make_pair(0U, &RISCV::GPRRegClass);
7690     case 'f':
7691       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
7692         return std::make_pair(0U, &RISCV::FPR16RegClass);
7693       if (Subtarget.hasStdExtF() && VT == MVT::f32)
7694         return std::make_pair(0U, &RISCV::FPR32RegClass);
7695       if (Subtarget.hasStdExtD() && VT == MVT::f64)
7696         return std::make_pair(0U, &RISCV::FPR64RegClass);
7697       break;
7698     case 'v':
7699       for (const auto *RC :
7700            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
7701             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
7702         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
7703           return std::make_pair(0U, RC);
7704       }
7705       break;
7706     default:
7707       break;
7708     }
7709   }
7710 
7711   // Clang will correctly decode the usage of register name aliases into their
7712   // official names. However, other frontends like `rustc` do not. This allows
7713   // users of these frontends to use the ABI names for registers in LLVM-style
7714   // register constraints.
7715   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
7716                                .Case("{zero}", RISCV::X0)
7717                                .Case("{ra}", RISCV::X1)
7718                                .Case("{sp}", RISCV::X2)
7719                                .Case("{gp}", RISCV::X3)
7720                                .Case("{tp}", RISCV::X4)
7721                                .Case("{t0}", RISCV::X5)
7722                                .Case("{t1}", RISCV::X6)
7723                                .Case("{t2}", RISCV::X7)
7724                                .Cases("{s0}", "{fp}", RISCV::X8)
7725                                .Case("{s1}", RISCV::X9)
7726                                .Case("{a0}", RISCV::X10)
7727                                .Case("{a1}", RISCV::X11)
7728                                .Case("{a2}", RISCV::X12)
7729                                .Case("{a3}", RISCV::X13)
7730                                .Case("{a4}", RISCV::X14)
7731                                .Case("{a5}", RISCV::X15)
7732                                .Case("{a6}", RISCV::X16)
7733                                .Case("{a7}", RISCV::X17)
7734                                .Case("{s2}", RISCV::X18)
7735                                .Case("{s3}", RISCV::X19)
7736                                .Case("{s4}", RISCV::X20)
7737                                .Case("{s5}", RISCV::X21)
7738                                .Case("{s6}", RISCV::X22)
7739                                .Case("{s7}", RISCV::X23)
7740                                .Case("{s8}", RISCV::X24)
7741                                .Case("{s9}", RISCV::X25)
7742                                .Case("{s10}", RISCV::X26)
7743                                .Case("{s11}", RISCV::X27)
7744                                .Case("{t3}", RISCV::X28)
7745                                .Case("{t4}", RISCV::X29)
7746                                .Case("{t5}", RISCV::X30)
7747                                .Case("{t6}", RISCV::X31)
7748                                .Default(RISCV::NoRegister);
7749   if (XRegFromAlias != RISCV::NoRegister)
7750     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
7751 
7752   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
7753   // TableGen record rather than the AsmName to choose registers for InlineAsm
7754   // constraints, plus we want to match those names to the widest floating point
7755   // register type available, manually select floating point registers here.
7756   //
7757   // The second case is the ABI name of the register, so that frontends can also
7758   // use the ABI names in register constraint lists.
7759   if (Subtarget.hasStdExtF()) {
7760     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
7761                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
7762                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
7763                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
7764                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
7765                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
7766                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
7767                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
7768                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
7769                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
7770                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
7771                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
7772                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
7773                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
7774                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
7775                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
7776                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
7777                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
7778                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
7779                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
7780                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
7781                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
7782                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
7783                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
7784                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
7785                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
7786                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
7787                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
7788                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
7789                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
7790                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
7791                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
7792                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
7793                         .Default(RISCV::NoRegister);
7794     if (FReg != RISCV::NoRegister) {
7795       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
7796       if (Subtarget.hasStdExtD()) {
7797         unsigned RegNo = FReg - RISCV::F0_F;
7798         unsigned DReg = RISCV::F0_D + RegNo;
7799         return std::make_pair(DReg, &RISCV::FPR64RegClass);
7800       }
7801       return std::make_pair(FReg, &RISCV::FPR32RegClass);
7802     }
7803   }
7804 
7805   if (Subtarget.hasStdExtV()) {
7806     Register VReg = StringSwitch<Register>(Constraint.lower())
7807                         .Case("{v0}", RISCV::V0)
7808                         .Case("{v1}", RISCV::V1)
7809                         .Case("{v2}", RISCV::V2)
7810                         .Case("{v3}", RISCV::V3)
7811                         .Case("{v4}", RISCV::V4)
7812                         .Case("{v5}", RISCV::V5)
7813                         .Case("{v6}", RISCV::V6)
7814                         .Case("{v7}", RISCV::V7)
7815                         .Case("{v8}", RISCV::V8)
7816                         .Case("{v9}", RISCV::V9)
7817                         .Case("{v10}", RISCV::V10)
7818                         .Case("{v11}", RISCV::V11)
7819                         .Case("{v12}", RISCV::V12)
7820                         .Case("{v13}", RISCV::V13)
7821                         .Case("{v14}", RISCV::V14)
7822                         .Case("{v15}", RISCV::V15)
7823                         .Case("{v16}", RISCV::V16)
7824                         .Case("{v17}", RISCV::V17)
7825                         .Case("{v18}", RISCV::V18)
7826                         .Case("{v19}", RISCV::V19)
7827                         .Case("{v20}", RISCV::V20)
7828                         .Case("{v21}", RISCV::V21)
7829                         .Case("{v22}", RISCV::V22)
7830                         .Case("{v23}", RISCV::V23)
7831                         .Case("{v24}", RISCV::V24)
7832                         .Case("{v25}", RISCV::V25)
7833                         .Case("{v26}", RISCV::V26)
7834                         .Case("{v27}", RISCV::V27)
7835                         .Case("{v28}", RISCV::V28)
7836                         .Case("{v29}", RISCV::V29)
7837                         .Case("{v30}", RISCV::V30)
7838                         .Case("{v31}", RISCV::V31)
7839                         .Default(RISCV::NoRegister);
7840     if (VReg != RISCV::NoRegister) {
7841       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
7842         return std::make_pair(VReg, &RISCV::VMRegClass);
7843       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
7844         return std::make_pair(VReg, &RISCV::VRRegClass);
7845       for (const auto *RC :
7846            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
7847         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
7848           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
7849           return std::make_pair(VReg, RC);
7850         }
7851       }
7852     }
7853   }
7854 
7855   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7856 }
7857 
7858 unsigned
7859 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
7860   // Currently only support length 1 constraints.
7861   if (ConstraintCode.size() == 1) {
7862     switch (ConstraintCode[0]) {
7863     case 'A':
7864       return InlineAsm::Constraint_A;
7865     default:
7866       break;
7867     }
7868   }
7869 
7870   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
7871 }
7872 
7873 void RISCVTargetLowering::LowerAsmOperandForConstraint(
7874     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
7875     SelectionDAG &DAG) const {
7876   // Currently only support length 1 constraints.
7877   if (Constraint.length() == 1) {
7878     switch (Constraint[0]) {
7879     case 'I':
7880       // Validate & create a 12-bit signed immediate operand.
7881       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
7882         uint64_t CVal = C->getSExtValue();
7883         if (isInt<12>(CVal))
7884           Ops.push_back(
7885               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
7886       }
7887       return;
7888     case 'J':
7889       // Validate & create an integer zero operand.
7890       if (auto *C = dyn_cast<ConstantSDNode>(Op))
7891         if (C->getZExtValue() == 0)
7892           Ops.push_back(
7893               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
7894       return;
7895     case 'K':
7896       // Validate & create a 5-bit unsigned immediate operand.
7897       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
7898         uint64_t CVal = C->getZExtValue();
7899         if (isUInt<5>(CVal))
7900           Ops.push_back(
7901               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
7902       }
7903       return;
7904     default:
7905       break;
7906     }
7907   }
7908   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
7909 }
7910 
7911 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
7912                                                    Instruction *Inst,
7913                                                    AtomicOrdering Ord) const {
7914   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
7915     return Builder.CreateFence(Ord);
7916   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
7917     return Builder.CreateFence(AtomicOrdering::Release);
7918   return nullptr;
7919 }
7920 
7921 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
7922                                                     Instruction *Inst,
7923                                                     AtomicOrdering Ord) const {
7924   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
7925     return Builder.CreateFence(AtomicOrdering::Acquire);
7926   return nullptr;
7927 }
7928 
7929 TargetLowering::AtomicExpansionKind
7930 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
7931   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
7932   // point operations can't be used in an lr/sc sequence without breaking the
7933   // forward-progress guarantee.
7934   if (AI->isFloatingPointOperation())
7935     return AtomicExpansionKind::CmpXChg;
7936 
7937   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
7938   if (Size == 8 || Size == 16)
7939     return AtomicExpansionKind::MaskedIntrinsic;
7940   return AtomicExpansionKind::None;
7941 }
7942 
7943 static Intrinsic::ID
7944 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
7945   if (XLen == 32) {
7946     switch (BinOp) {
7947     default:
7948       llvm_unreachable("Unexpected AtomicRMW BinOp");
7949     case AtomicRMWInst::Xchg:
7950       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
7951     case AtomicRMWInst::Add:
7952       return Intrinsic::riscv_masked_atomicrmw_add_i32;
7953     case AtomicRMWInst::Sub:
7954       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
7955     case AtomicRMWInst::Nand:
7956       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
7957     case AtomicRMWInst::Max:
7958       return Intrinsic::riscv_masked_atomicrmw_max_i32;
7959     case AtomicRMWInst::Min:
7960       return Intrinsic::riscv_masked_atomicrmw_min_i32;
7961     case AtomicRMWInst::UMax:
7962       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
7963     case AtomicRMWInst::UMin:
7964       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
7965     }
7966   }
7967 
7968   if (XLen == 64) {
7969     switch (BinOp) {
7970     default:
7971       llvm_unreachable("Unexpected AtomicRMW BinOp");
7972     case AtomicRMWInst::Xchg:
7973       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
7974     case AtomicRMWInst::Add:
7975       return Intrinsic::riscv_masked_atomicrmw_add_i64;
7976     case AtomicRMWInst::Sub:
7977       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
7978     case AtomicRMWInst::Nand:
7979       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
7980     case AtomicRMWInst::Max:
7981       return Intrinsic::riscv_masked_atomicrmw_max_i64;
7982     case AtomicRMWInst::Min:
7983       return Intrinsic::riscv_masked_atomicrmw_min_i64;
7984     case AtomicRMWInst::UMax:
7985       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
7986     case AtomicRMWInst::UMin:
7987       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
7988     }
7989   }
7990 
7991   llvm_unreachable("Unexpected XLen\n");
7992 }
7993 
7994 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
7995     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
7996     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
7997   unsigned XLen = Subtarget.getXLen();
7998   Value *Ordering =
7999       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
8000   Type *Tys[] = {AlignedAddr->getType()};
8001   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
8002       AI->getModule(),
8003       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
8004 
8005   if (XLen == 64) {
8006     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
8007     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8008     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
8009   }
8010 
8011   Value *Result;
8012 
8013   // Must pass the shift amount needed to sign extend the loaded value prior
8014   // to performing a signed comparison for min/max. ShiftAmt is the number of
8015   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
8016   // is the number of bits to left+right shift the value in order to
8017   // sign-extend.
8018   if (AI->getOperation() == AtomicRMWInst::Min ||
8019       AI->getOperation() == AtomicRMWInst::Max) {
8020     const DataLayout &DL = AI->getModule()->getDataLayout();
8021     unsigned ValWidth =
8022         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
8023     Value *SextShamt =
8024         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
8025     Result = Builder.CreateCall(LrwOpScwLoop,
8026                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
8027   } else {
8028     Result =
8029         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
8030   }
8031 
8032   if (XLen == 64)
8033     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8034   return Result;
8035 }
8036 
8037 TargetLowering::AtomicExpansionKind
8038 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
8039     AtomicCmpXchgInst *CI) const {
8040   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
8041   if (Size == 8 || Size == 16)
8042     return AtomicExpansionKind::MaskedIntrinsic;
8043   return AtomicExpansionKind::None;
8044 }
8045 
8046 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
8047     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
8048     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
8049   unsigned XLen = Subtarget.getXLen();
8050   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
8051   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
8052   if (XLen == 64) {
8053     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
8054     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
8055     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
8056     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
8057   }
8058   Type *Tys[] = {AlignedAddr->getType()};
8059   Function *MaskedCmpXchg =
8060       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
8061   Value *Result = Builder.CreateCall(
8062       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
8063   if (XLen == 64)
8064     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
8065   return Result;
8066 }
8067 
8068 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
8069   return false;
8070 }
8071 
8072 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
8073                                                      EVT VT) const {
8074   VT = VT.getScalarType();
8075 
8076   if (!VT.isSimple())
8077     return false;
8078 
8079   switch (VT.getSimpleVT().SimpleTy) {
8080   case MVT::f16:
8081     return Subtarget.hasStdExtZfh();
8082   case MVT::f32:
8083     return Subtarget.hasStdExtF();
8084   case MVT::f64:
8085     return Subtarget.hasStdExtD();
8086   default:
8087     break;
8088   }
8089 
8090   return false;
8091 }
8092 
8093 Register RISCVTargetLowering::getExceptionPointerRegister(
8094     const Constant *PersonalityFn) const {
8095   return RISCV::X10;
8096 }
8097 
8098 Register RISCVTargetLowering::getExceptionSelectorRegister(
8099     const Constant *PersonalityFn) const {
8100   return RISCV::X11;
8101 }
8102 
8103 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
8104   // Return false to suppress the unnecessary extensions if the LibCall
8105   // arguments or return value is f32 type for LP64 ABI.
8106   RISCVABI::ABI ABI = Subtarget.getTargetABI();
8107   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
8108     return false;
8109 
8110   return true;
8111 }
8112 
8113 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
8114   if (Subtarget.is64Bit() && Type == MVT::i32)
8115     return true;
8116 
8117   return IsSigned;
8118 }
8119 
8120 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
8121                                                  SDValue C) const {
8122   // Check integral scalar types.
8123   if (VT.isScalarInteger()) {
8124     // Omit the optimization if the sub target has the M extension and the data
8125     // size exceeds XLen.
8126     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
8127       return false;
8128     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
8129       // Break the MUL to a SLLI and an ADD/SUB.
8130       const APInt &Imm = ConstNode->getAPIntValue();
8131       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
8132           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
8133         return true;
8134       // Omit the following optimization if the sub target has the M extension
8135       // and the data size >= XLen.
8136       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
8137         return false;
8138       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
8139       // a pair of LUI/ADDI.
8140       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
8141         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
8142         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
8143             (1 - ImmS).isPowerOf2())
8144         return true;
8145       }
8146     }
8147   }
8148 
8149   return false;
8150 }
8151 
8152 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
8153     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
8154     bool *Fast) const {
8155   if (!VT.isScalableVector())
8156     return false;
8157 
8158   EVT ElemVT = VT.getVectorElementType();
8159   if (Alignment >= ElemVT.getStoreSize()) {
8160     if (Fast)
8161       *Fast = true;
8162     return true;
8163   }
8164 
8165   return false;
8166 }
8167 
8168 bool RISCVTargetLowering::splitValueIntoRegisterParts(
8169     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8170     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8171   bool IsABIRegCopy = CC.hasValue();
8172   EVT ValueVT = Val.getValueType();
8173   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8174     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8175     // and cast to f32.
8176     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8177     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8178     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8179                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8180     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8181     Parts[0] = Val;
8182     return true;
8183   }
8184 
8185   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8186     LLVMContext &Context = *DAG.getContext();
8187     EVT ValueEltVT = ValueVT.getVectorElementType();
8188     EVT PartEltVT = PartVT.getVectorElementType();
8189     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8190     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8191     if (PartVTBitSize % ValueVTBitSize == 0) {
8192       // If the element types are different, bitcast to the same element type of
8193       // PartVT first.
8194       if (ValueEltVT != PartEltVT) {
8195         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8196         assert(Count != 0 && "The number of element should not be zero.");
8197         EVT SameEltTypeVT =
8198             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8199         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8200       }
8201       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8202                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8203       Parts[0] = Val;
8204       return true;
8205     }
8206   }
8207   return false;
8208 }
8209 
8210 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8211     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8212     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8213   bool IsABIRegCopy = CC.hasValue();
8214   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8215     SDValue Val = Parts[0];
8216 
8217     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8218     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8219     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8220     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8221     return Val;
8222   }
8223 
8224   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8225     LLVMContext &Context = *DAG.getContext();
8226     SDValue Val = Parts[0];
8227     EVT ValueEltVT = ValueVT.getVectorElementType();
8228     EVT PartEltVT = PartVT.getVectorElementType();
8229     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8230     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8231     if (PartVTBitSize % ValueVTBitSize == 0) {
8232       EVT SameEltTypeVT = ValueVT;
8233       // If the element types are different, convert it to the same element type
8234       // of PartVT.
8235       if (ValueEltVT != PartEltVT) {
8236         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8237         assert(Count != 0 && "The number of element should not be zero.");
8238         SameEltTypeVT =
8239             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8240       }
8241       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8242                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8243       if (ValueEltVT != PartEltVT)
8244         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8245       return Val;
8246     }
8247   }
8248   return SDValue();
8249 }
8250 
8251 #define GET_REGISTER_MATCHER
8252 #include "RISCVGenAsmMatcher.inc"
8253 
8254 Register
8255 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8256                                        const MachineFunction &MF) const {
8257   Register Reg = MatchRegisterAltName(RegName);
8258   if (Reg == RISCV::NoRegister)
8259     Reg = MatchRegisterName(RegName);
8260   if (Reg == RISCV::NoRegister)
8261     report_fatal_error(
8262         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8263   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8264   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8265     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8266                              StringRef(RegName) + "\"."));
8267   return Reg;
8268 }
8269 
8270 namespace llvm {
8271 namespace RISCVVIntrinsicsTable {
8272 
8273 #define GET_RISCVVIntrinsicsTable_IMPL
8274 #include "RISCVGenSearchableTables.inc"
8275 
8276 } // namespace RISCVVIntrinsicsTable
8277 
8278 } // namespace llvm
8279