1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation  --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IntrinsicsRISCV.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/KnownBits.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "riscv-lower"
42 
43 STATISTIC(NumTailCalls, "Number of tail calls");
44 
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46                                          const RISCVSubtarget &STI)
47     : TargetLowering(TM), Subtarget(STI) {
48 
49   if (Subtarget.isRV32E())
50     report_fatal_error("Codegen not yet implemented for RV32E");
51 
52   RISCVABI::ABI ABI = Subtarget.getTargetABI();
53   assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
54 
55   if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56       !Subtarget.hasStdExtF()) {
57     errs() << "Hard-float 'f' ABI can't be used for a target that "
58                 "doesn't support the F instruction set extension (ignoring "
59                           "target-abi)\n";
60     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61   } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62              !Subtarget.hasStdExtD()) {
63     errs() << "Hard-float 'd' ABI can't be used for a target that "
64               "doesn't support the D instruction set extension (ignoring "
65               "target-abi)\n";
66     ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
67   }
68 
69   switch (ABI) {
70   default:
71     report_fatal_error("Don't know how to lower this ABI");
72   case RISCVABI::ABI_ILP32:
73   case RISCVABI::ABI_ILP32F:
74   case RISCVABI::ABI_ILP32D:
75   case RISCVABI::ABI_LP64:
76   case RISCVABI::ABI_LP64F:
77   case RISCVABI::ABI_LP64D:
78     break;
79   }
80 
81   MVT XLenVT = Subtarget.getXLenVT();
82 
83   // Set up the register classes.
84   addRegisterClass(XLenVT, &RISCV::GPRRegClass);
85 
86   if (Subtarget.hasStdExtZfh())
87     addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
88   if (Subtarget.hasStdExtF())
89     addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
90   if (Subtarget.hasStdExtD())
91     addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
92 
93   static const MVT::SimpleValueType BoolVecVTs[] = {
94       MVT::nxv1i1,  MVT::nxv2i1,  MVT::nxv4i1, MVT::nxv8i1,
95       MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
96   static const MVT::SimpleValueType IntVecVTs[] = {
97       MVT::nxv1i8,  MVT::nxv2i8,   MVT::nxv4i8,   MVT::nxv8i8,  MVT::nxv16i8,
98       MVT::nxv32i8, MVT::nxv64i8,  MVT::nxv1i16,  MVT::nxv2i16, MVT::nxv4i16,
99       MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
100       MVT::nxv4i32, MVT::nxv8i32,  MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
101       MVT::nxv4i64, MVT::nxv8i64};
102   static const MVT::SimpleValueType F16VecVTs[] = {
103       MVT::nxv1f16, MVT::nxv2f16,  MVT::nxv4f16,
104       MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
105   static const MVT::SimpleValueType F32VecVTs[] = {
106       MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
107   static const MVT::SimpleValueType F64VecVTs[] = {
108       MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
109 
110   if (Subtarget.hasStdExtV()) {
111     auto addRegClassForRVV = [this](MVT VT) {
112       unsigned Size = VT.getSizeInBits().getKnownMinValue();
113       assert(Size <= 512 && isPowerOf2_32(Size));
114       const TargetRegisterClass *RC;
115       if (Size <= 64)
116         RC = &RISCV::VRRegClass;
117       else if (Size == 128)
118         RC = &RISCV::VRM2RegClass;
119       else if (Size == 256)
120         RC = &RISCV::VRM4RegClass;
121       else
122         RC = &RISCV::VRM8RegClass;
123 
124       addRegisterClass(VT, RC);
125     };
126 
127     for (MVT VT : BoolVecVTs)
128       addRegClassForRVV(VT);
129     for (MVT VT : IntVecVTs)
130       addRegClassForRVV(VT);
131 
132     if (Subtarget.hasStdExtZfh())
133       for (MVT VT : F16VecVTs)
134         addRegClassForRVV(VT);
135 
136     if (Subtarget.hasStdExtF())
137       for (MVT VT : F32VecVTs)
138         addRegClassForRVV(VT);
139 
140     if (Subtarget.hasStdExtD())
141       for (MVT VT : F64VecVTs)
142         addRegClassForRVV(VT);
143 
144     if (Subtarget.useRVVForFixedLengthVectors()) {
145       auto addRegClassForFixedVectors = [this](MVT VT) {
146         unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
147         const TargetRegisterClass *RC;
148         if (LMul == 1 || VT.getVectorElementType() == MVT::i1)
149           RC = &RISCV::VRRegClass;
150         else if (LMul == 2)
151           RC = &RISCV::VRM2RegClass;
152         else if (LMul == 4)
153           RC = &RISCV::VRM4RegClass;
154         else if (LMul == 8)
155           RC = &RISCV::VRM8RegClass;
156         else
157           llvm_unreachable("Unexpected LMul!");
158 
159         addRegisterClass(VT, RC);
160       };
161       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
162         if (useRVVForFixedLengthVectorVT(VT))
163           addRegClassForFixedVectors(VT);
164 
165       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
166         if (useRVVForFixedLengthVectorVT(VT))
167           addRegClassForFixedVectors(VT);
168     }
169   }
170 
171   // Compute derived properties from the register classes.
172   computeRegisterProperties(STI.getRegisterInfo());
173 
174   setStackPointerRegisterToSaveRestore(RISCV::X2);
175 
176   for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
177     setLoadExtAction(N, XLenVT, MVT::i1, Promote);
178 
179   // TODO: add all necessary setOperationAction calls.
180   setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
181 
182   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
183   setOperationAction(ISD::BR_CC, XLenVT, Expand);
184   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
185   setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
186 
187   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
188   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
189 
190   setOperationAction(ISD::VASTART, MVT::Other, Custom);
191   setOperationAction(ISD::VAARG, MVT::Other, Expand);
192   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
193   setOperationAction(ISD::VAEND, MVT::Other, Expand);
194 
195   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
196   if (!Subtarget.hasStdExtZbb()) {
197     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
198     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
199   }
200 
201   if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit())
202     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
203 
204   if (Subtarget.is64Bit()) {
205     setOperationAction(ISD::ADD, MVT::i32, Custom);
206     setOperationAction(ISD::SUB, MVT::i32, Custom);
207     setOperationAction(ISD::SHL, MVT::i32, Custom);
208     setOperationAction(ISD::SRA, MVT::i32, Custom);
209     setOperationAction(ISD::SRL, MVT::i32, Custom);
210 
211     setOperationAction(ISD::UADDO, MVT::i32, Custom);
212     setOperationAction(ISD::USUBO, MVT::i32, Custom);
213     setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
214     setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
215   }
216 
217   if (!Subtarget.hasStdExtM()) {
218     setOperationAction(ISD::MUL, XLenVT, Expand);
219     setOperationAction(ISD::MULHS, XLenVT, Expand);
220     setOperationAction(ISD::MULHU, XLenVT, Expand);
221     setOperationAction(ISD::SDIV, XLenVT, Expand);
222     setOperationAction(ISD::UDIV, XLenVT, Expand);
223     setOperationAction(ISD::SREM, XLenVT, Expand);
224     setOperationAction(ISD::UREM, XLenVT, Expand);
225   } else {
226     if (Subtarget.is64Bit()) {
227       setOperationAction(ISD::MUL, MVT::i32, Custom);
228       setOperationAction(ISD::MUL, MVT::i128, Custom);
229 
230       setOperationAction(ISD::SDIV, MVT::i8, Custom);
231       setOperationAction(ISD::UDIV, MVT::i8, Custom);
232       setOperationAction(ISD::UREM, MVT::i8, Custom);
233       setOperationAction(ISD::SDIV, MVT::i16, Custom);
234       setOperationAction(ISD::UDIV, MVT::i16, Custom);
235       setOperationAction(ISD::UREM, MVT::i16, Custom);
236       setOperationAction(ISD::SDIV, MVT::i32, Custom);
237       setOperationAction(ISD::UDIV, MVT::i32, Custom);
238       setOperationAction(ISD::UREM, MVT::i32, Custom);
239     } else {
240       setOperationAction(ISD::MUL, MVT::i64, Custom);
241     }
242   }
243 
244   setOperationAction(ISD::SDIVREM, XLenVT, Expand);
245   setOperationAction(ISD::UDIVREM, XLenVT, Expand);
246   setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
247   setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
248 
249   setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
250   setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
251   setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
252 
253   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
254     if (Subtarget.is64Bit()) {
255       setOperationAction(ISD::ROTL, MVT::i32, Custom);
256       setOperationAction(ISD::ROTR, MVT::i32, Custom);
257     }
258   } else {
259     setOperationAction(ISD::ROTL, XLenVT, Expand);
260     setOperationAction(ISD::ROTR, XLenVT, Expand);
261   }
262 
263   if (Subtarget.hasStdExtZbp()) {
264     // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
265     // more combining.
266     setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
267     setOperationAction(ISD::BSWAP, XLenVT, Custom);
268 
269     if (Subtarget.is64Bit()) {
270       setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
271       setOperationAction(ISD::BSWAP, MVT::i32, Custom);
272     }
273   } else {
274     // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
275     // pattern match it directly in isel.
276     setOperationAction(ISD::BSWAP, XLenVT,
277                        Subtarget.hasStdExtZbb() ? Legal : Expand);
278   }
279 
280   if (Subtarget.hasStdExtZbb()) {
281     setOperationAction(ISD::SMIN, XLenVT, Legal);
282     setOperationAction(ISD::SMAX, XLenVT, Legal);
283     setOperationAction(ISD::UMIN, XLenVT, Legal);
284     setOperationAction(ISD::UMAX, XLenVT, Legal);
285 
286     if (Subtarget.is64Bit()) {
287       setOperationAction(ISD::CTTZ, MVT::i32, Custom);
288       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
289       setOperationAction(ISD::CTLZ, MVT::i32, Custom);
290       setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
291     }
292   } else {
293     setOperationAction(ISD::CTTZ, XLenVT, Expand);
294     setOperationAction(ISD::CTLZ, XLenVT, Expand);
295     setOperationAction(ISD::CTPOP, XLenVT, Expand);
296   }
297 
298   if (Subtarget.hasStdExtZbt()) {
299     setOperationAction(ISD::FSHL, XLenVT, Custom);
300     setOperationAction(ISD::FSHR, XLenVT, Custom);
301     setOperationAction(ISD::SELECT, XLenVT, Legal);
302 
303     if (Subtarget.is64Bit()) {
304       setOperationAction(ISD::FSHL, MVT::i32, Custom);
305       setOperationAction(ISD::FSHR, MVT::i32, Custom);
306     }
307   } else {
308     setOperationAction(ISD::SELECT, XLenVT, Custom);
309   }
310 
311   ISD::CondCode FPCCToExpand[] = {
312       ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
313       ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
314       ISD::SETGE,  ISD::SETNE,  ISD::SETO,   ISD::SETUO};
315 
316   ISD::NodeType FPOpToExpand[] = {
317       ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
318       ISD::FP_TO_FP16};
319 
320   if (Subtarget.hasStdExtZfh())
321     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
322 
323   if (Subtarget.hasStdExtZfh()) {
324     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
325     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
326     for (auto CC : FPCCToExpand)
327       setCondCodeAction(CC, MVT::f16, Expand);
328     setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
329     setOperationAction(ISD::SELECT, MVT::f16, Custom);
330     setOperationAction(ISD::BR_CC, MVT::f16, Expand);
331     for (auto Op : FPOpToExpand)
332       setOperationAction(Op, MVT::f16, Expand);
333   }
334 
335   if (Subtarget.hasStdExtF()) {
336     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
337     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
338     for (auto CC : FPCCToExpand)
339       setCondCodeAction(CC, MVT::f32, Expand);
340     setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
341     setOperationAction(ISD::SELECT, MVT::f32, Custom);
342     setOperationAction(ISD::BR_CC, MVT::f32, Expand);
343     for (auto Op : FPOpToExpand)
344       setOperationAction(Op, MVT::f32, Expand);
345     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
346     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
347   }
348 
349   if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
350     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
351 
352   if (Subtarget.hasStdExtD()) {
353     setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
354     setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
355     for (auto CC : FPCCToExpand)
356       setCondCodeAction(CC, MVT::f64, Expand);
357     setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
358     setOperationAction(ISD::SELECT, MVT::f64, Custom);
359     setOperationAction(ISD::BR_CC, MVT::f64, Expand);
360     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
361     setTruncStoreAction(MVT::f64, MVT::f32, Expand);
362     for (auto Op : FPOpToExpand)
363       setOperationAction(Op, MVT::f64, Expand);
364     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
365     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
366   }
367 
368   if (Subtarget.is64Bit()) {
369     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
370     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
371     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
372     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
373   }
374 
375   if (Subtarget.hasStdExtF()) {
376     setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
377     setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
378   }
379 
380   setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
381   setOperationAction(ISD::BlockAddress, XLenVT, Custom);
382   setOperationAction(ISD::ConstantPool, XLenVT, Custom);
383   setOperationAction(ISD::JumpTable, XLenVT, Custom);
384 
385   setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
386 
387   // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
388   // Unfortunately this can't be determined just from the ISA naming string.
389   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
390                      Subtarget.is64Bit() ? Legal : Custom);
391 
392   setOperationAction(ISD::TRAP, MVT::Other, Legal);
393   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
394   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
395 
396   if (Subtarget.hasStdExtA()) {
397     setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
398     setMinCmpXchgSizeInBits(32);
399   } else {
400     setMaxAtomicSizeInBitsSupported(0);
401   }
402 
403   setBooleanContents(ZeroOrOneBooleanContent);
404 
405   if (Subtarget.hasStdExtV()) {
406     setBooleanVectorContents(ZeroOrOneBooleanContent);
407 
408     setOperationAction(ISD::VSCALE, XLenVT, Custom);
409 
410     // RVV intrinsics may have illegal operands.
411     // We also need to custom legalize vmv.x.s.
412     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
413     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
414     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
415     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
416     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
417     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
418     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
419     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
420 
421     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
422 
423     if (!Subtarget.is64Bit()) {
424       // We must custom-lower certain vXi64 operations on RV32 due to the vector
425       // element type being illegal.
426       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
427       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
428 
429       setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
430       setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
431       setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
432       setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
433       setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
434       setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
435       setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
436       setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
437     }
438 
439     for (MVT VT : BoolVecVTs) {
440       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
441 
442       // Mask VTs are custom-expanded into a series of standard nodes
443       setOperationAction(ISD::TRUNCATE, VT, Custom);
444       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
445       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
446 
447       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
448 
449       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
450       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
451       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
452 
453       // Expand all extending loads to types larger than this, and truncating
454       // stores from types larger than this.
455       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
456         setTruncStoreAction(OtherVT, VT, Expand);
457         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
458         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
459         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
460       }
461     }
462 
463     for (MVT VT : IntVecVTs) {
464       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
465       setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
466 
467       setOperationAction(ISD::SMIN, VT, Legal);
468       setOperationAction(ISD::SMAX, VT, Legal);
469       setOperationAction(ISD::UMIN, VT, Legal);
470       setOperationAction(ISD::UMAX, VT, Legal);
471 
472       setOperationAction(ISD::ROTL, VT, Expand);
473       setOperationAction(ISD::ROTR, VT, Expand);
474 
475       // Custom-lower extensions and truncations from/to mask types.
476       setOperationAction(ISD::ANY_EXTEND, VT, Custom);
477       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
478       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
479 
480       // RVV has native int->float & float->int conversions where the
481       // element type sizes are within one power-of-two of each other. Any
482       // wider distances between type sizes have to be lowered as sequences
483       // which progressively narrow the gap in stages.
484       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
485       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
486       setOperationAction(ISD::FP_TO_SINT, VT, Custom);
487       setOperationAction(ISD::FP_TO_UINT, VT, Custom);
488 
489       // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
490       // nodes which truncate by one power of two at a time.
491       setOperationAction(ISD::TRUNCATE, VT, Custom);
492 
493       // Custom-lower insert/extract operations to simplify patterns.
494       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
495       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
496 
497       // Custom-lower reduction operations to set up the corresponding custom
498       // nodes' operands.
499       setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
500       setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
501       setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
502       setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
503       setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
504       setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
505       setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
506       setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
507 
508       setOperationAction(ISD::MLOAD, VT, Custom);
509       setOperationAction(ISD::MSTORE, VT, Custom);
510       setOperationAction(ISD::MGATHER, VT, Custom);
511       setOperationAction(ISD::MSCATTER, VT, Custom);
512 
513       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
514       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
515       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
516 
517       setOperationAction(ISD::STEP_VECTOR, VT, Custom);
518       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
519 
520       for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
521         setTruncStoreAction(VT, OtherVT, Expand);
522         setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
523         setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
524         setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
525       }
526     }
527 
528     // Expand various CCs to best match the RVV ISA, which natively supports UNE
529     // but no other unordered comparisons, and supports all ordered comparisons
530     // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
531     // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
532     // and we pattern-match those back to the "original", swapping operands once
533     // more. This way we catch both operations and both "vf" and "fv" forms with
534     // fewer patterns.
535     ISD::CondCode VFPCCToExpand[] = {
536         ISD::SETO,   ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
537         ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
538         ISD::SETGT,  ISD::SETOGT, ISD::SETGE,  ISD::SETOGE,
539     };
540 
541     // Sets common operation actions on RVV floating-point vector types.
542     const auto SetCommonVFPActions = [&](MVT VT) {
543       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
544       // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
545       // sizes are within one power-of-two of each other. Therefore conversions
546       // between vXf16 and vXf64 must be lowered as sequences which convert via
547       // vXf32.
548       setOperationAction(ISD::FP_ROUND, VT, Custom);
549       setOperationAction(ISD::FP_EXTEND, VT, Custom);
550       // Custom-lower insert/extract operations to simplify patterns.
551       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
552       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
553       // Expand various condition codes (explained above).
554       for (auto CC : VFPCCToExpand)
555         setCondCodeAction(CC, VT, Expand);
556 
557       setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
558       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
559       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
560 
561       setOperationAction(ISD::MLOAD, VT, Custom);
562       setOperationAction(ISD::MSTORE, VT, Custom);
563       setOperationAction(ISD::MGATHER, VT, Custom);
564       setOperationAction(ISD::MSCATTER, VT, Custom);
565 
566       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
567       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
568       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
569 
570       setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
571     };
572 
573     // Sets common extload/truncstore actions on RVV floating-point vector
574     // types.
575     const auto SetCommonVFPExtLoadTruncStoreActions =
576         [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
577           for (auto SmallVT : SmallerVTs) {
578             setTruncStoreAction(VT, SmallVT, Expand);
579             setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
580           }
581         };
582 
583     if (Subtarget.hasStdExtZfh())
584       for (MVT VT : F16VecVTs)
585         SetCommonVFPActions(VT);
586 
587     for (MVT VT : F32VecVTs) {
588       if (Subtarget.hasStdExtF())
589         SetCommonVFPActions(VT);
590       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
591     }
592 
593     for (MVT VT : F64VecVTs) {
594       if (Subtarget.hasStdExtD())
595         SetCommonVFPActions(VT);
596       SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
597       SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
598     }
599 
600     if (Subtarget.useRVVForFixedLengthVectors()) {
601       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
602         if (!useRVVForFixedLengthVectorVT(VT))
603           continue;
604 
605         // By default everything must be expanded.
606         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
607           setOperationAction(Op, VT, Expand);
608         for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
609           setTruncStoreAction(VT, OtherVT, Expand);
610           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
611           setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
612           setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
613         }
614 
615         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
616         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
617         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
618 
619         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
620         setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
621 
622         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
623 
624         setOperationAction(ISD::LOAD, VT, Custom);
625         setOperationAction(ISD::STORE, VT, Custom);
626 
627         setOperationAction(ISD::SETCC, VT, Custom);
628 
629         setOperationAction(ISD::TRUNCATE, VT, Custom);
630 
631         setOperationAction(ISD::BITCAST, VT, Custom);
632 
633         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
634         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
635         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
636 
637         // Operations below are different for between masks and other vectors.
638         if (VT.getVectorElementType() == MVT::i1) {
639           setOperationAction(ISD::AND, VT, Custom);
640           setOperationAction(ISD::OR, VT, Custom);
641           setOperationAction(ISD::XOR, VT, Custom);
642           continue;
643         }
644 
645         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
646         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
647 
648         setOperationAction(ISD::MLOAD, VT, Custom);
649         setOperationAction(ISD::MSTORE, VT, Custom);
650         setOperationAction(ISD::MGATHER, VT, Custom);
651         setOperationAction(ISD::MSCATTER, VT, Custom);
652         setOperationAction(ISD::ADD, VT, Custom);
653         setOperationAction(ISD::MUL, VT, Custom);
654         setOperationAction(ISD::SUB, VT, Custom);
655         setOperationAction(ISD::AND, VT, Custom);
656         setOperationAction(ISD::OR, VT, Custom);
657         setOperationAction(ISD::XOR, VT, Custom);
658         setOperationAction(ISD::SDIV, VT, Custom);
659         setOperationAction(ISD::SREM, VT, Custom);
660         setOperationAction(ISD::UDIV, VT, Custom);
661         setOperationAction(ISD::UREM, VT, Custom);
662         setOperationAction(ISD::SHL, VT, Custom);
663         setOperationAction(ISD::SRA, VT, Custom);
664         setOperationAction(ISD::SRL, VT, Custom);
665 
666         setOperationAction(ISD::SMIN, VT, Custom);
667         setOperationAction(ISD::SMAX, VT, Custom);
668         setOperationAction(ISD::UMIN, VT, Custom);
669         setOperationAction(ISD::UMAX, VT, Custom);
670         setOperationAction(ISD::ABS,  VT, Custom);
671 
672         setOperationAction(ISD::MULHS, VT, Custom);
673         setOperationAction(ISD::MULHU, VT, Custom);
674 
675         setOperationAction(ISD::SINT_TO_FP, VT, Custom);
676         setOperationAction(ISD::UINT_TO_FP, VT, Custom);
677         setOperationAction(ISD::FP_TO_SINT, VT, Custom);
678         setOperationAction(ISD::FP_TO_UINT, VT, Custom);
679 
680         setOperationAction(ISD::VSELECT, VT, Custom);
681 
682         setOperationAction(ISD::ANY_EXTEND, VT, Custom);
683         setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
684         setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
685 
686         // Custom-lower reduction operations to set up the corresponding custom
687         // nodes' operands.
688         setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
689         setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
690         setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
691         setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
692         setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
693       }
694 
695       for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
696         if (!useRVVForFixedLengthVectorVT(VT))
697           continue;
698 
699         // By default everything must be expanded.
700         for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
701           setOperationAction(Op, VT, Expand);
702         for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
703           setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
704           setTruncStoreAction(VT, OtherVT, Expand);
705         }
706 
707         // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
708         setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
709         setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
710 
711         setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
712         setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
713         setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
714         setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
715 
716         setOperationAction(ISD::LOAD, VT, Custom);
717         setOperationAction(ISD::STORE, VT, Custom);
718         setOperationAction(ISD::MLOAD, VT, Custom);
719         setOperationAction(ISD::MSTORE, VT, Custom);
720         setOperationAction(ISD::MGATHER, VT, Custom);
721         setOperationAction(ISD::MSCATTER, VT, Custom);
722         setOperationAction(ISD::FADD, VT, Custom);
723         setOperationAction(ISD::FSUB, VT, Custom);
724         setOperationAction(ISD::FMUL, VT, Custom);
725         setOperationAction(ISD::FDIV, VT, Custom);
726         setOperationAction(ISD::FNEG, VT, Custom);
727         setOperationAction(ISD::FABS, VT, Custom);
728         setOperationAction(ISD::FCOPYSIGN, VT, Custom);
729         setOperationAction(ISD::FSQRT, VT, Custom);
730         setOperationAction(ISD::FMA, VT, Custom);
731 
732         setOperationAction(ISD::FP_ROUND, VT, Custom);
733         setOperationAction(ISD::FP_EXTEND, VT, Custom);
734 
735         for (auto CC : VFPCCToExpand)
736           setCondCodeAction(CC, VT, Expand);
737 
738         setOperationAction(ISD::VSELECT, VT, Custom);
739 
740         setOperationAction(ISD::BITCAST, VT, Custom);
741 
742         setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
743         setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
744       }
745 
746       // Custom-legalize bitcasts from fixed-length vectors to scalar types.
747       setOperationAction(ISD::BITCAST, MVT::i8, Custom);
748       setOperationAction(ISD::BITCAST, MVT::i16, Custom);
749       setOperationAction(ISD::BITCAST, MVT::i32, Custom);
750       setOperationAction(ISD::BITCAST, MVT::i64, Custom);
751       setOperationAction(ISD::BITCAST, MVT::f16, Custom);
752       setOperationAction(ISD::BITCAST, MVT::f32, Custom);
753       setOperationAction(ISD::BITCAST, MVT::f64, Custom);
754     }
755   }
756 
757   // Function alignments.
758   const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
759   setMinFunctionAlignment(FunctionAlignment);
760   setPrefFunctionAlignment(FunctionAlignment);
761 
762   setMinimumJumpTableEntries(5);
763 
764   // Jumps are expensive, compared to logic
765   setJumpIsExpensive();
766 
767   // We can use any register for comparisons
768   setHasMultipleConditionRegisters();
769 
770   if (Subtarget.hasStdExtZbp()) {
771     setTargetDAGCombine(ISD::OR);
772   }
773   if (Subtarget.hasStdExtV()) {
774     setTargetDAGCombine(ISD::FCOPYSIGN);
775     setTargetDAGCombine(ISD::MGATHER);
776     setTargetDAGCombine(ISD::MSCATTER);
777   }
778 }
779 
780 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
781                                             LLVMContext &Context,
782                                             EVT VT) const {
783   if (!VT.isVector())
784     return getPointerTy(DL);
785   if (Subtarget.hasStdExtV() &&
786       (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
787     return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
788   return VT.changeVectorElementTypeToInteger();
789 }
790 
791 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
792                                              const CallInst &I,
793                                              MachineFunction &MF,
794                                              unsigned Intrinsic) const {
795   switch (Intrinsic) {
796   default:
797     return false;
798   case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
799   case Intrinsic::riscv_masked_atomicrmw_add_i32:
800   case Intrinsic::riscv_masked_atomicrmw_sub_i32:
801   case Intrinsic::riscv_masked_atomicrmw_nand_i32:
802   case Intrinsic::riscv_masked_atomicrmw_max_i32:
803   case Intrinsic::riscv_masked_atomicrmw_min_i32:
804   case Intrinsic::riscv_masked_atomicrmw_umax_i32:
805   case Intrinsic::riscv_masked_atomicrmw_umin_i32:
806   case Intrinsic::riscv_masked_cmpxchg_i32:
807     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
808     Info.opc = ISD::INTRINSIC_W_CHAIN;
809     Info.memVT = MVT::getVT(PtrTy->getElementType());
810     Info.ptrVal = I.getArgOperand(0);
811     Info.offset = 0;
812     Info.align = Align(4);
813     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
814                  MachineMemOperand::MOVolatile;
815     return true;
816   }
817 }
818 
819 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
820                                                 const AddrMode &AM, Type *Ty,
821                                                 unsigned AS,
822                                                 Instruction *I) const {
823   // No global is ever allowed as a base.
824   if (AM.BaseGV)
825     return false;
826 
827   // Require a 12-bit signed offset.
828   if (!isInt<12>(AM.BaseOffs))
829     return false;
830 
831   switch (AM.Scale) {
832   case 0: // "r+i" or just "i", depending on HasBaseReg.
833     break;
834   case 1:
835     if (!AM.HasBaseReg) // allow "r+i".
836       break;
837     return false; // disallow "r+r" or "r+r+i".
838   default:
839     return false;
840   }
841 
842   return true;
843 }
844 
845 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
846   return isInt<12>(Imm);
847 }
848 
849 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
850   return isInt<12>(Imm);
851 }
852 
853 // On RV32, 64-bit integers are split into their high and low parts and held
854 // in two different registers, so the trunc is free since the low register can
855 // just be used.
856 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
857   if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
858     return false;
859   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
860   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
861   return (SrcBits == 64 && DestBits == 32);
862 }
863 
864 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
865   if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
866       !SrcVT.isInteger() || !DstVT.isInteger())
867     return false;
868   unsigned SrcBits = SrcVT.getSizeInBits();
869   unsigned DestBits = DstVT.getSizeInBits();
870   return (SrcBits == 64 && DestBits == 32);
871 }
872 
873 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
874   // Zexts are free if they can be combined with a load.
875   if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
876     EVT MemVT = LD->getMemoryVT();
877     if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
878          (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
879         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
880          LD->getExtensionType() == ISD::ZEXTLOAD))
881       return true;
882   }
883 
884   return TargetLowering::isZExtFree(Val, VT2);
885 }
886 
887 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
888   return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
889 }
890 
891 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
892   return Subtarget.hasStdExtZbb();
893 }
894 
895 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
896   return Subtarget.hasStdExtZbb();
897 }
898 
899 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
900                                        bool ForCodeSize) const {
901   if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
902     return false;
903   if (VT == MVT::f32 && !Subtarget.hasStdExtF())
904     return false;
905   if (VT == MVT::f64 && !Subtarget.hasStdExtD())
906     return false;
907   if (Imm.isNegZero())
908     return false;
909   return Imm.isZero();
910 }
911 
912 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
913   return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
914          (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
915          (VT == MVT::f64 && Subtarget.hasStdExtD());
916 }
917 
918 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
919                                                       CallingConv::ID CC,
920                                                       EVT VT) const {
921   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
922   // end up using a GPR but that will be decided based on ABI.
923   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
924     return MVT::f32;
925 
926   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
927 }
928 
929 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
930                                                            CallingConv::ID CC,
931                                                            EVT VT) const {
932   // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
933   // end up using a GPR but that will be decided based on ABI.
934   if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
935     return 1;
936 
937   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
938 }
939 
940 // Changes the condition code and swaps operands if necessary, so the SetCC
941 // operation matches one of the comparisons supported directly by branches
942 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
943 // with 1/-1.
944 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
945                                     ISD::CondCode &CC, SelectionDAG &DAG) {
946   // Convert X > -1 to X >= 0.
947   if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
948     RHS = DAG.getConstant(0, DL, RHS.getValueType());
949     CC = ISD::SETGE;
950     return;
951   }
952   // Convert X < 1 to 0 >= X.
953   if (CC == ISD::SETLT && isOneConstant(RHS)) {
954     RHS = LHS;
955     LHS = DAG.getConstant(0, DL, RHS.getValueType());
956     CC = ISD::SETGE;
957     return;
958   }
959 
960   switch (CC) {
961   default:
962     break;
963   case ISD::SETGT:
964   case ISD::SETLE:
965   case ISD::SETUGT:
966   case ISD::SETULE:
967     CC = ISD::getSetCCSwappedOperands(CC);
968     std::swap(LHS, RHS);
969     break;
970   }
971 }
972 
973 // Return the RISC-V branch opcode that matches the given DAG integer
974 // condition code. The CondCode must be one of those supported by the RISC-V
975 // ISA (see translateSetCCForBranch).
976 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
977   switch (CC) {
978   default:
979     llvm_unreachable("Unsupported CondCode");
980   case ISD::SETEQ:
981     return RISCV::BEQ;
982   case ISD::SETNE:
983     return RISCV::BNE;
984   case ISD::SETLT:
985     return RISCV::BLT;
986   case ISD::SETGE:
987     return RISCV::BGE;
988   case ISD::SETULT:
989     return RISCV::BLTU;
990   case ISD::SETUGE:
991     return RISCV::BGEU;
992   }
993 }
994 
995 RISCVVLMUL RISCVTargetLowering::getLMUL(MVT VT) {
996   assert(VT.isScalableVector() && "Expecting a scalable vector type");
997   unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
998   if (VT.getVectorElementType() == MVT::i1)
999     KnownSize *= 8;
1000 
1001   switch (KnownSize) {
1002   default:
1003     llvm_unreachable("Invalid LMUL.");
1004   case 8:
1005     return RISCVVLMUL::LMUL_F8;
1006   case 16:
1007     return RISCVVLMUL::LMUL_F4;
1008   case 32:
1009     return RISCVVLMUL::LMUL_F2;
1010   case 64:
1011     return RISCVVLMUL::LMUL_1;
1012   case 128:
1013     return RISCVVLMUL::LMUL_2;
1014   case 256:
1015     return RISCVVLMUL::LMUL_4;
1016   case 512:
1017     return RISCVVLMUL::LMUL_8;
1018   }
1019 }
1020 
1021 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVLMUL LMul) {
1022   switch (LMul) {
1023   default:
1024     llvm_unreachable("Invalid LMUL.");
1025   case RISCVVLMUL::LMUL_F8:
1026   case RISCVVLMUL::LMUL_F4:
1027   case RISCVVLMUL::LMUL_F2:
1028   case RISCVVLMUL::LMUL_1:
1029     return RISCV::VRRegClassID;
1030   case RISCVVLMUL::LMUL_2:
1031     return RISCV::VRM2RegClassID;
1032   case RISCVVLMUL::LMUL_4:
1033     return RISCV::VRM4RegClassID;
1034   case RISCVVLMUL::LMUL_8:
1035     return RISCV::VRM8RegClassID;
1036   }
1037 }
1038 
1039 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1040   RISCVVLMUL LMUL = getLMUL(VT);
1041   if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
1042       LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
1043     static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1044                   "Unexpected subreg numbering");
1045     return RISCV::sub_vrm1_0 + Index;
1046   }
1047   if (LMUL == RISCVVLMUL::LMUL_2) {
1048     static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1049                   "Unexpected subreg numbering");
1050     return RISCV::sub_vrm2_0 + Index;
1051   }
1052   if (LMUL == RISCVVLMUL::LMUL_4) {
1053     static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1054                   "Unexpected subreg numbering");
1055     return RISCV::sub_vrm4_0 + Index;
1056   }
1057   llvm_unreachable("Invalid vector type.");
1058 }
1059 
1060 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1061   if (VT.getVectorElementType() == MVT::i1)
1062     return RISCV::VRRegClassID;
1063   return getRegClassIDForLMUL(getLMUL(VT));
1064 }
1065 
1066 // Attempt to decompose a subvector insert/extract between VecVT and
1067 // SubVecVT via subregister indices. Returns the subregister index that
1068 // can perform the subvector insert/extract with the given element index, as
1069 // well as the index corresponding to any leftover subvectors that must be
1070 // further inserted/extracted within the register class for SubVecVT.
1071 std::pair<unsigned, unsigned>
1072 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1073     MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1074     const RISCVRegisterInfo *TRI) {
1075   static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1076                  RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1077                  RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1078                 "Register classes not ordered");
1079   unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1080   unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1081   // Try to compose a subregister index that takes us from the incoming
1082   // LMUL>1 register class down to the outgoing one. At each step we half
1083   // the LMUL:
1084   //   nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1085   // Note that this is not guaranteed to find a subregister index, such as
1086   // when we are extracting from one VR type to another.
1087   unsigned SubRegIdx = RISCV::NoSubRegister;
1088   for (const unsigned RCID :
1089        {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1090     if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1091       VecVT = VecVT.getHalfNumVectorElementsVT();
1092       bool IsHi =
1093           InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1094       SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1095                                             getSubregIndexByMVT(VecVT, IsHi));
1096       if (IsHi)
1097         InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1098     }
1099   return {SubRegIdx, InsertExtractIdx};
1100 }
1101 
1102 // Return the largest legal scalable vector type that matches VT's element type.
1103 MVT RISCVTargetLowering::getContainerForFixedLengthVector(
1104     const TargetLowering &TLI, MVT VT, const RISCVSubtarget &Subtarget) {
1105   assert(VT.isFixedLengthVector() && TLI.isTypeLegal(VT) &&
1106          "Expected legal fixed length vector!");
1107 
1108   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
1109   assert(LMul <= 8 && isPowerOf2_32(LMul) && "Unexpected LMUL!");
1110 
1111   MVT EltVT = VT.getVectorElementType();
1112   switch (EltVT.SimpleTy) {
1113   default:
1114     llvm_unreachable("unexpected element type for RVV container");
1115   case MVT::i1: {
1116     // Masks are calculated assuming 8-bit elements since that's when we need
1117     // the most elements.
1118     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / 8;
1119     return MVT::getScalableVectorVT(MVT::i1, LMul * EltsPerBlock);
1120   }
1121   case MVT::i8:
1122   case MVT::i16:
1123   case MVT::i32:
1124   case MVT::i64:
1125   case MVT::f16:
1126   case MVT::f32:
1127   case MVT::f64: {
1128     unsigned EltsPerBlock = RISCV::RVVBitsPerBlock / EltVT.getSizeInBits();
1129     return MVT::getScalableVectorVT(EltVT, LMul * EltsPerBlock);
1130   }
1131   }
1132 }
1133 
1134 MVT RISCVTargetLowering::getContainerForFixedLengthVector(
1135     SelectionDAG &DAG, MVT VT, const RISCVSubtarget &Subtarget) {
1136   return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1137                                           Subtarget);
1138 }
1139 
1140 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1141   return getContainerForFixedLengthVector(*this, VT, getSubtarget());
1142 }
1143 
1144 // Grow V to consume an entire RVV register.
1145 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1146                                        const RISCVSubtarget &Subtarget) {
1147   assert(VT.isScalableVector() &&
1148          "Expected to convert into a scalable vector!");
1149   assert(V.getValueType().isFixedLengthVector() &&
1150          "Expected a fixed length vector operand!");
1151   SDLoc DL(V);
1152   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1153   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1154 }
1155 
1156 // Shrink V so it's just big enough to maintain a VT's worth of data.
1157 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1158                                          const RISCVSubtarget &Subtarget) {
1159   assert(VT.isFixedLengthVector() &&
1160          "Expected to convert into a fixed length vector!");
1161   assert(V.getValueType().isScalableVector() &&
1162          "Expected a scalable vector operand!");
1163   SDLoc DL(V);
1164   SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1165   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1166 }
1167 
1168 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1169 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1170 // the vector type that it is contained in.
1171 static std::pair<SDValue, SDValue>
1172 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1173                 const RISCVSubtarget &Subtarget) {
1174   assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1175   MVT XLenVT = Subtarget.getXLenVT();
1176   SDValue VL = VecVT.isFixedLengthVector()
1177                    ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1178                    : DAG.getRegister(RISCV::X0, XLenVT);
1179   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1180   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1181   return {Mask, VL};
1182 }
1183 
1184 // As above but assuming the given type is a scalable vector type.
1185 static std::pair<SDValue, SDValue>
1186 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1187                         const RISCVSubtarget &Subtarget) {
1188   assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1189   return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1190 }
1191 
1192 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1193 // of either is (currently) supported. This can get us into an infinite loop
1194 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1195 // as a ..., etc.
1196 // Until either (or both) of these can reliably lower any node, reporting that
1197 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1198 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1199 // which is not desirable.
1200 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1201     EVT VT, unsigned DefinedValues) const {
1202   return false;
1203 }
1204 
1205 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1206   // Only splats are currently supported.
1207   if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1208     return true;
1209 
1210   return false;
1211 }
1212 
1213 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1214                                  const RISCVSubtarget &Subtarget) {
1215   MVT VT = Op.getSimpleValueType();
1216   assert(VT.isFixedLengthVector() && "Unexpected vector!");
1217 
1218   MVT ContainerVT =
1219       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
1220 
1221   SDLoc DL(Op);
1222   SDValue Mask, VL;
1223   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1224 
1225   MVT XLenVT = Subtarget.getXLenVT();
1226   unsigned NumElts = Op.getNumOperands();
1227 
1228   if (VT.getVectorElementType() == MVT::i1) {
1229     if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1230       SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1231       return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1232     }
1233 
1234     if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1235       SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1236       return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1237     }
1238 
1239     // Lower constant mask BUILD_VECTORs via an integer vector type, in
1240     // scalar integer chunks whose bit-width depends on the number of mask
1241     // bits and XLEN.
1242     // First, determine the most appropriate scalar integer type to use. This
1243     // is at most XLenVT, but may be shrunk to a smaller vector element type
1244     // according to the size of the final vector - use i8 chunks rather than
1245     // XLenVT if we're producing a v8i1. This results in more consistent
1246     // codegen across RV32 and RV64.
1247     // If we have to use more than one INSERT_VECTOR_ELT then this optimization
1248     // is likely to increase code size; avoid peforming it in such a case.
1249     unsigned NumViaIntegerBits =
1250         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1251     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1252         (!DAG.shouldOptForSize() || NumElts <= NumViaIntegerBits)) {
1253       // Now we can create our integer vector type. Note that it may be larger
1254       // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1255       MVT IntegerViaVecVT =
1256           MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1257                            divideCeil(NumElts, NumViaIntegerBits));
1258 
1259       uint64_t Bits = 0;
1260       unsigned BitPos = 0, IntegerEltIdx = 0;
1261       SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1262 
1263       for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1264         // Once we accumulate enough bits to fill our scalar type, insert into
1265         // our vector and clear our accumulated data.
1266         if (I != 0 && I % NumViaIntegerBits == 0) {
1267           if (NumViaIntegerBits <= 32)
1268             Bits = SignExtend64(Bits, 32);
1269           SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1270           Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1271                             Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1272           Bits = 0;
1273           BitPos = 0;
1274           IntegerEltIdx++;
1275         }
1276         SDValue V = Op.getOperand(I);
1277         bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1278         Bits |= ((uint64_t)BitValue << BitPos);
1279       }
1280 
1281       // Insert the (remaining) scalar value into position in our integer
1282       // vector type.
1283       if (NumViaIntegerBits <= 32)
1284         Bits = SignExtend64(Bits, 32);
1285       SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1286       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1287                         DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1288 
1289       if (NumElts < NumViaIntegerBits) {
1290         // If we're producing a smaller vector than our minimum legal integer
1291         // type, bitcast to the equivalent (known-legal) mask type, and extract
1292         // our final mask.
1293         assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1294         Vec = DAG.getBitcast(MVT::v8i1, Vec);
1295         Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1296                           DAG.getConstant(0, DL, XLenVT));
1297       } else {
1298         // Else we must have produced an integer type with the same size as the
1299         // mask type; bitcast for the final result.
1300         assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1301         Vec = DAG.getBitcast(VT, Vec);
1302       }
1303 
1304       return Vec;
1305     }
1306 
1307     return SDValue();
1308   }
1309 
1310   if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1311     unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1312                                         : RISCVISD::VMV_V_X_VL;
1313     Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1314     return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1315   }
1316 
1317   // Try and match an index sequence, which we can lower directly to the vid
1318   // instruction. An all-undef vector is matched by getSplatValue, above.
1319   if (VT.isInteger()) {
1320     bool IsVID = true;
1321     for (unsigned I = 0; I < NumElts && IsVID; I++)
1322       IsVID &= Op.getOperand(I).isUndef() ||
1323                (isa<ConstantSDNode>(Op.getOperand(I)) &&
1324                 Op.getConstantOperandVal(I) == I);
1325 
1326     if (IsVID) {
1327       SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1328       return convertFromScalableVector(VT, VID, DAG, Subtarget);
1329     }
1330   }
1331 
1332   // Attempt to detect "hidden" splats, which only reveal themselves as splats
1333   // when re-interpreted as a vector with a larger element type. For example,
1334   //   v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1335   // could be instead splat as
1336   //   v2i32 = build_vector i32 0x00010000, i32 0x00010000
1337   // TODO: This optimization could also work on non-constant splats, but it
1338   // would require bit-manipulation instructions to construct the splat value.
1339   SmallVector<SDValue> Sequence;
1340   unsigned EltBitSize = VT.getScalarSizeInBits();
1341   const auto *BV = cast<BuildVectorSDNode>(Op);
1342   if (VT.isInteger() && EltBitSize < 64 &&
1343       ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1344       BV->getRepeatedSequence(Sequence) &&
1345       (Sequence.size() * EltBitSize) <= 64) {
1346     unsigned SeqLen = Sequence.size();
1347     MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1348     MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1349     assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1350             ViaIntVT == MVT::i64) &&
1351            "Unexpected sequence type");
1352 
1353     unsigned EltIdx = 0;
1354     uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1355     uint64_t SplatValue = 0;
1356     // Construct the amalgamated value which can be splatted as this larger
1357     // vector type.
1358     for (const auto &SeqV : Sequence) {
1359       if (!SeqV.isUndef())
1360         SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1361                        << (EltIdx * EltBitSize));
1362       EltIdx++;
1363     }
1364 
1365     // On RV64, sign-extend from 32 to 64 bits where possible in order to
1366     // achieve better constant materializion.
1367     if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1368       SplatValue = SignExtend64(SplatValue, 32);
1369 
1370     // Since we can't introduce illegal i64 types at this stage, we can only
1371     // perform an i64 splat on RV32 if it is its own sign-extended value. That
1372     // way we can use RVV instructions to splat.
1373     assert((ViaIntVT.bitsLE(XLenVT) ||
1374             (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1375            "Unexpected bitcast sequence");
1376     if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1377       SDValue ViaVL =
1378           DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1379       MVT ViaContainerVT =
1380           RISCVTargetLowering::getContainerForFixedLengthVector(DAG, ViaVecVT,
1381                                                                 Subtarget);
1382       SDValue Splat =
1383           DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1384                       DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1385       Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1386       return DAG.getBitcast(VT, Splat);
1387     }
1388   }
1389 
1390   // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1391   // which constitute a large proportion of the elements. In such cases we can
1392   // splat a vector with the dominant element and make up the shortfall with
1393   // INSERT_VECTOR_ELTs.
1394   // Note that this includes vectors of 2 elements by association. The
1395   // upper-most element is the "dominant" one, allowing us to use a splat to
1396   // "insert" the upper element, and an insert of the lower element at position
1397   // 0, which improves codegen.
1398   SDValue DominantValue;
1399   unsigned MostCommonCount = 0;
1400   DenseMap<SDValue, unsigned> ValueCounts;
1401   unsigned NumUndefElts =
1402       count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1403 
1404   for (SDValue V : Op->op_values()) {
1405     if (V.isUndef())
1406       continue;
1407 
1408     ValueCounts.insert(std::make_pair(V, 0));
1409     unsigned &Count = ValueCounts[V];
1410 
1411     // Is this value dominant? In case of a tie, prefer the highest element as
1412     // it's cheaper to insert near the beginning of a vector than it is at the
1413     // end.
1414     if (++Count >= MostCommonCount) {
1415       DominantValue = V;
1416       MostCommonCount = Count;
1417     }
1418   }
1419 
1420   assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1421   unsigned NumDefElts = NumElts - NumUndefElts;
1422   unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1423 
1424   // Don't perform this optimization when optimizing for size, since
1425   // materializing elements and inserting them tends to cause code bloat.
1426   if (!DAG.shouldOptForSize() &&
1427       ((MostCommonCount > DominantValueCountThreshold) ||
1428        (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1429     // Start by splatting the most common element.
1430     SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1431 
1432     DenseSet<SDValue> Processed{DominantValue};
1433     MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1434     for (const auto &OpIdx : enumerate(Op->ops())) {
1435       const SDValue &V = OpIdx.value();
1436       if (V.isUndef() || !Processed.insert(V).second)
1437         continue;
1438       if (ValueCounts[V] == 1) {
1439         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1440                           DAG.getConstant(OpIdx.index(), DL, XLenVT));
1441       } else {
1442         // Blend in all instances of this value using a VSELECT, using a
1443         // mask where each bit signals whether that element is the one
1444         // we're after.
1445         SmallVector<SDValue> Ops;
1446         transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1447           return DAG.getConstant(V == V1, DL, XLenVT);
1448         });
1449         Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1450                           DAG.getBuildVector(SelMaskTy, DL, Ops),
1451                           DAG.getSplatBuildVector(VT, DL, V), Vec);
1452       }
1453     }
1454 
1455     return Vec;
1456   }
1457 
1458   return SDValue();
1459 }
1460 
1461 // Called by type legalization to handle splat of i64 on RV32.
1462 // FIXME: We can optimize this when the type has sign or zero bits in one
1463 // of the halves.
1464 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
1465                                    SDValue VL, SelectionDAG &DAG,
1466                                    const RISCVSubtarget &Subtarget) {
1467   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1468                            DAG.getConstant(0, DL, MVT::i32));
1469   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
1470                            DAG.getConstant(1, DL, MVT::i32));
1471 
1472   // Fall back to a stack store and stride x0 vector load.
1473   MachineFunction &MF = DAG.getMachineFunction();
1474   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
1475 
1476   // We use the same frame index we use for moving two i32s into 64-bit FPR.
1477   // This is an analogous operation.
1478   int FI = FuncInfo->getMoveF64FrameIndex(MF);
1479   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
1480   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1481   SDValue StackSlot =
1482       DAG.getFrameIndex(FI, TLI.getPointerTy(DAG.getDataLayout()));
1483 
1484   SDValue Chain = DAG.getEntryNode();
1485   Lo = DAG.getStore(Chain, DL, Lo, StackSlot, MPI, Align(8));
1486 
1487   SDValue OffsetSlot =
1488       DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL);
1489   Hi = DAG.getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4), Align(8));
1490 
1491   Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi);
1492 
1493   MVT XLenVT = Subtarget.getXLenVT();
1494   SDVTList VTs = DAG.getVTList({VT, MVT::Other});
1495   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1496   SDValue Ops[] = {Chain, IntID, StackSlot, DAG.getRegister(RISCV::X0, XLenVT),
1497                    VL};
1498 
1499   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MVT::i64,
1500                                  MPI, Align(8), MachineMemOperand::MOLoad);
1501 }
1502 
1503 // This function lowers a splat of a scalar operand Splat with the vector
1504 // length VL. It ensures the final sequence is type legal, which is useful when
1505 // lowering a splat after type legalization.
1506 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
1507                                 SelectionDAG &DAG,
1508                                 const RISCVSubtarget &Subtarget) {
1509   if (VT.isFloatingPoint())
1510     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
1511 
1512   MVT XLenVT = Subtarget.getXLenVT();
1513 
1514   // Simplest case is that the operand needs to be promoted to XLenVT.
1515   if (Scalar.getValueType().bitsLE(XLenVT)) {
1516     // If the operand is a constant, sign extend to increase our chances
1517     // of being able to use a .vi instruction. ANY_EXTEND would become a
1518     // a zero extend and the simm5 check in isel would fail.
1519     // FIXME: Should we ignore the upper bits in isel instead?
1520     unsigned ExtOpc =
1521         isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
1522     Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
1523     return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
1524   }
1525 
1526   assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
1527          "Unexpected scalar for splat lowering!");
1528 
1529   // If this is a sign-extended 32-bit constant, we can truncate it and rely
1530   // on the instruction to sign-extend since SEW>XLEN.
1531   if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar)) {
1532     if (isInt<32>(CVal->getSExtValue()))
1533       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
1534                          DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32),
1535                          VL);
1536   }
1537 
1538   // Otherwise use the more complicated splatting algorithm.
1539   return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG, Subtarget);
1540 }
1541 
1542 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
1543                                    const RISCVSubtarget &Subtarget) {
1544   SDValue V1 = Op.getOperand(0);
1545   SDValue V2 = Op.getOperand(1);
1546   SDLoc DL(Op);
1547   MVT XLenVT = Subtarget.getXLenVT();
1548   MVT VT = Op.getSimpleValueType();
1549   unsigned NumElts = VT.getVectorNumElements();
1550   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
1551 
1552   MVT ContainerVT =
1553       RISCVTargetLowering::getContainerForFixedLengthVector(DAG, VT, Subtarget);
1554 
1555   SDValue TrueMask, VL;
1556   std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1557 
1558   if (SVN->isSplat()) {
1559     const int Lane = SVN->getSplatIndex();
1560     if (Lane >= 0) {
1561       MVT SVT = VT.getVectorElementType();
1562 
1563       // Turn splatted vector load into a strided load with an X0 stride.
1564       SDValue V = V1;
1565       // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
1566       // with undef.
1567       // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
1568       int Offset = Lane;
1569       if (V.getOpcode() == ISD::CONCAT_VECTORS) {
1570         int OpElements =
1571             V.getOperand(0).getSimpleValueType().getVectorNumElements();
1572         V = V.getOperand(Offset / OpElements);
1573         Offset %= OpElements;
1574       }
1575 
1576       // We need to ensure the load isn't atomic or volatile.
1577       if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
1578         auto *Ld = cast<LoadSDNode>(V);
1579         Offset *= SVT.getStoreSize();
1580         SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
1581                                                    TypeSize::Fixed(Offset), DL);
1582 
1583         SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
1584         SDValue IntID =
1585             DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
1586         SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
1587                          DAG.getRegister(RISCV::X0, XLenVT), VL};
1588         SDValue NewLoad = DAG.getMemIntrinsicNode(
1589             ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
1590             DAG.getMachineFunction().getMachineMemOperand(
1591                 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
1592         DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
1593         return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
1594       }
1595 
1596       V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1597       assert(Lane < (int)NumElts && "Unexpected lane!");
1598       SDValue Gather =
1599           DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
1600                       DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
1601       return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1602     }
1603   }
1604 
1605   // Detect shuffles which can be re-expressed as vector selects; these are
1606   // shuffles in which each element in the destination is taken from an element
1607   // at the corresponding index in either source vectors.
1608   bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
1609     int MaskIndex = MaskIdx.value();
1610     return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
1611   });
1612 
1613   assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
1614 
1615   SmallVector<SDValue> MaskVals;
1616   // As a backup, shuffles can be lowered via a vrgather instruction, possibly
1617   // merged with a second vrgather.
1618   SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
1619 
1620   // By default we preserve the original operand order, and use a mask to
1621   // select LHS as true and RHS as false. However, since RVV vector selects may
1622   // feature splats but only on the LHS, we may choose to invert our mask and
1623   // instead select between RHS and LHS.
1624   bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
1625   bool InvertMask = IsSelect == SwapOps;
1626 
1627   // Now construct the mask that will be used by the vselect or blended
1628   // vrgather operation. For vrgathers, construct the appropriate indices into
1629   // each vector.
1630   for (int MaskIndex : SVN->getMask()) {
1631     bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
1632     MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
1633     if (!IsSelect) {
1634       bool IsLHS = MaskIndex < (int)NumElts;
1635       // For "undef" elements of -1, shuffle in element 0 instead.
1636       GatherIndicesLHS.push_back(
1637           DAG.getConstant(IsLHS ? std::max(MaskIndex, 0) : 0, DL, XLenVT));
1638       // TODO: If we're masking out unused elements anyway, it might produce
1639       // better code if we use the most-common element index instead of 0.
1640       GatherIndicesRHS.push_back(
1641           DAG.getConstant(IsLHS ? 0 : MaskIndex - NumElts, DL, XLenVT));
1642     }
1643   }
1644 
1645   if (SwapOps) {
1646     std::swap(V1, V2);
1647     std::swap(GatherIndicesLHS, GatherIndicesRHS);
1648   }
1649 
1650   assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
1651   MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
1652   SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
1653 
1654   if (IsSelect)
1655     return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
1656 
1657   if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
1658     // On such a large vector we're unable to use i8 as the index type.
1659     // FIXME: We could promote the index to i16 and use vrgatherei16, but that
1660     // may involve vector splitting if we're already at LMUL=8, or our
1661     // user-supplied maximum fixed-length LMUL.
1662     return SDValue();
1663   }
1664 
1665   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
1666   MVT IndexVT = VT.changeTypeToInteger();
1667   // Since we can't introduce illegal index types at this stage, use i16 and
1668   // vrgatherei16 if the corresponding index type for plain vrgather is greater
1669   // than XLenVT.
1670   if (IndexVT.getScalarType().bitsGT(XLenVT)) {
1671     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
1672     IndexVT = IndexVT.changeVectorElementType(MVT::i16);
1673   }
1674 
1675   MVT IndexContainerVT =
1676       ContainerVT.changeVectorElementType(IndexVT.getScalarType());
1677 
1678   SDValue Gather;
1679   // TODO: This doesn't trigger for i64 vectors on RV32, since there we
1680   // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
1681   if (SDValue SplatValue = DAG.getSplatValue(V1)) {
1682     Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
1683   } else {
1684     SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
1685     LHSIndices =
1686         convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
1687 
1688     V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
1689     Gather =
1690         DAG.getNode(GatherOpc, DL, ContainerVT, V1, LHSIndices, TrueMask, VL);
1691   }
1692 
1693   // If a second vector operand is used by this shuffle, blend it in with an
1694   // additional vrgather.
1695   if (!V2.isUndef()) {
1696     MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
1697     SelectMask =
1698         convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
1699 
1700     SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
1701     RHSIndices =
1702         convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
1703 
1704     V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
1705     V2 = DAG.getNode(GatherOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, VL);
1706     Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
1707                          Gather, VL);
1708   }
1709 
1710   return convertFromScalableVector(VT, Gather, DAG, Subtarget);
1711 }
1712 
1713 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
1714                                      SDLoc DL, SelectionDAG &DAG,
1715                                      const RISCVSubtarget &Subtarget) {
1716   if (VT.isScalableVector())
1717     return DAG.getFPExtendOrRound(Op, DL, VT);
1718   assert(VT.isFixedLengthVector() &&
1719          "Unexpected value type for RVV FP extend/round lowering");
1720   SDValue Mask, VL;
1721   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1722   unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
1723                         ? RISCVISD::FP_EXTEND_VL
1724                         : RISCVISD::FP_ROUND_VL;
1725   return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
1726 }
1727 
1728 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
1729                                             SelectionDAG &DAG) const {
1730   switch (Op.getOpcode()) {
1731   default:
1732     report_fatal_error("unimplemented operand");
1733   case ISD::GlobalAddress:
1734     return lowerGlobalAddress(Op, DAG);
1735   case ISD::BlockAddress:
1736     return lowerBlockAddress(Op, DAG);
1737   case ISD::ConstantPool:
1738     return lowerConstantPool(Op, DAG);
1739   case ISD::JumpTable:
1740     return lowerJumpTable(Op, DAG);
1741   case ISD::GlobalTLSAddress:
1742     return lowerGlobalTLSAddress(Op, DAG);
1743   case ISD::SELECT:
1744     return lowerSELECT(Op, DAG);
1745   case ISD::BRCOND:
1746     return lowerBRCOND(Op, DAG);
1747   case ISD::VASTART:
1748     return lowerVASTART(Op, DAG);
1749   case ISD::FRAMEADDR:
1750     return lowerFRAMEADDR(Op, DAG);
1751   case ISD::RETURNADDR:
1752     return lowerRETURNADDR(Op, DAG);
1753   case ISD::SHL_PARTS:
1754     return lowerShiftLeftParts(Op, DAG);
1755   case ISD::SRA_PARTS:
1756     return lowerShiftRightParts(Op, DAG, true);
1757   case ISD::SRL_PARTS:
1758     return lowerShiftRightParts(Op, DAG, false);
1759   case ISD::BITCAST: {
1760     SDLoc DL(Op);
1761     EVT VT = Op.getValueType();
1762     SDValue Op0 = Op.getOperand(0);
1763     EVT Op0VT = Op0.getValueType();
1764     MVT XLenVT = Subtarget.getXLenVT();
1765     if (VT.isFixedLengthVector()) {
1766       // We can handle fixed length vector bitcasts with a simple replacement
1767       // in isel.
1768       if (Op0VT.isFixedLengthVector())
1769         return Op;
1770       // When bitcasting from scalar to fixed-length vector, insert the scalar
1771       // into a one-element vector of the result type, and perform a vector
1772       // bitcast.
1773       if (!Op0VT.isVector()) {
1774         auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
1775         return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
1776                                               DAG.getUNDEF(BVT), Op0,
1777                                               DAG.getConstant(0, DL, XLenVT)));
1778       }
1779       return SDValue();
1780     }
1781     // Custom-legalize bitcasts from fixed-length vector types to scalar types
1782     // thus: bitcast the vector to a one-element vector type whose element type
1783     // is the same as the result type, and extract the first element.
1784     if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
1785       LLVMContext &Context = *DAG.getContext();
1786       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
1787       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
1788                          DAG.getConstant(0, DL, XLenVT));
1789     }
1790     if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
1791       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
1792       SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
1793       return FPConv;
1794     }
1795     if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
1796         Subtarget.hasStdExtF()) {
1797       SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
1798       SDValue FPConv =
1799           DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
1800       return FPConv;
1801     }
1802     return SDValue();
1803   }
1804   case ISD::INTRINSIC_WO_CHAIN:
1805     return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1806   case ISD::INTRINSIC_W_CHAIN:
1807     return LowerINTRINSIC_W_CHAIN(Op, DAG);
1808   case ISD::BSWAP:
1809   case ISD::BITREVERSE: {
1810     // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
1811     assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
1812     MVT VT = Op.getSimpleValueType();
1813     SDLoc DL(Op);
1814     // Start with the maximum immediate value which is the bitwidth - 1.
1815     unsigned Imm = VT.getSizeInBits() - 1;
1816     // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
1817     if (Op.getOpcode() == ISD::BSWAP)
1818       Imm &= ~0x7U;
1819     return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
1820                        DAG.getConstant(Imm, DL, VT));
1821   }
1822   case ISD::FSHL:
1823   case ISD::FSHR: {
1824     MVT VT = Op.getSimpleValueType();
1825     assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
1826     SDLoc DL(Op);
1827     if (Op.getOperand(2).getOpcode() == ISD::Constant)
1828       return Op;
1829     // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
1830     // use log(XLen) bits. Mask the shift amount accordingly.
1831     unsigned ShAmtWidth = Subtarget.getXLen() - 1;
1832     SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
1833                                 DAG.getConstant(ShAmtWidth, DL, VT));
1834     unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
1835     return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
1836   }
1837   case ISD::TRUNCATE: {
1838     SDLoc DL(Op);
1839     MVT VT = Op.getSimpleValueType();
1840     // Only custom-lower vector truncates
1841     if (!VT.isVector())
1842       return Op;
1843 
1844     // Truncates to mask types are handled differently
1845     if (VT.getVectorElementType() == MVT::i1)
1846       return lowerVectorMaskTrunc(Op, DAG);
1847 
1848     // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
1849     // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
1850     // truncate by one power of two at a time.
1851     MVT DstEltVT = VT.getVectorElementType();
1852 
1853     SDValue Src = Op.getOperand(0);
1854     MVT SrcVT = Src.getSimpleValueType();
1855     MVT SrcEltVT = SrcVT.getVectorElementType();
1856 
1857     assert(DstEltVT.bitsLT(SrcEltVT) &&
1858            isPowerOf2_64(DstEltVT.getSizeInBits()) &&
1859            isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
1860            "Unexpected vector truncate lowering");
1861 
1862     MVT ContainerVT = SrcVT;
1863     if (SrcVT.isFixedLengthVector()) {
1864       ContainerVT = getContainerForFixedLengthVector(SrcVT);
1865       Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
1866     }
1867 
1868     SDValue Result = Src;
1869     SDValue Mask, VL;
1870     std::tie(Mask, VL) =
1871         getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
1872     LLVMContext &Context = *DAG.getContext();
1873     const ElementCount Count = ContainerVT.getVectorElementCount();
1874     do {
1875       SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
1876       EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
1877       Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
1878                            Mask, VL);
1879     } while (SrcEltVT != DstEltVT);
1880 
1881     if (SrcVT.isFixedLengthVector())
1882       Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
1883 
1884     return Result;
1885   }
1886   case ISD::ANY_EXTEND:
1887   case ISD::ZERO_EXTEND:
1888     if (Op.getOperand(0).getValueType().isVector() &&
1889         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1890       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
1891     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
1892   case ISD::SIGN_EXTEND:
1893     if (Op.getOperand(0).getValueType().isVector() &&
1894         Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
1895       return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
1896     return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
1897   case ISD::SPLAT_VECTOR_PARTS:
1898     return lowerSPLAT_VECTOR_PARTS(Op, DAG);
1899   case ISD::INSERT_VECTOR_ELT:
1900     return lowerINSERT_VECTOR_ELT(Op, DAG);
1901   case ISD::EXTRACT_VECTOR_ELT:
1902     return lowerEXTRACT_VECTOR_ELT(Op, DAG);
1903   case ISD::VSCALE: {
1904     MVT VT = Op.getSimpleValueType();
1905     SDLoc DL(Op);
1906     SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
1907     // We define our scalable vector types for lmul=1 to use a 64 bit known
1908     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
1909     // vscale as VLENB / 8.
1910     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
1911     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
1912                                  DAG.getConstant(3, DL, VT));
1913     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
1914   }
1915   case ISD::FP_EXTEND: {
1916     // RVV can only do fp_extend to types double the size as the source. We
1917     // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
1918     // via f32.
1919     SDLoc DL(Op);
1920     MVT VT = Op.getSimpleValueType();
1921     SDValue Src = Op.getOperand(0);
1922     MVT SrcVT = Src.getSimpleValueType();
1923 
1924     // Prepare any fixed-length vector operands.
1925     MVT ContainerVT = VT;
1926     if (SrcVT.isFixedLengthVector()) {
1927       ContainerVT = getContainerForFixedLengthVector(VT);
1928       MVT SrcContainerVT =
1929           ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
1930       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1931     }
1932 
1933     if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
1934         SrcVT.getVectorElementType() != MVT::f16) {
1935       // For scalable vectors, we only need to close the gap between
1936       // vXf16->vXf64.
1937       if (!VT.isFixedLengthVector())
1938         return Op;
1939       // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
1940       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1941       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1942     }
1943 
1944     MVT InterVT = VT.changeVectorElementType(MVT::f32);
1945     MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
1946     SDValue IntermediateExtend = getRVVFPExtendOrRound(
1947         Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
1948 
1949     SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
1950                                            DL, DAG, Subtarget);
1951     if (VT.isFixedLengthVector())
1952       return convertFromScalableVector(VT, Extend, DAG, Subtarget);
1953     return Extend;
1954   }
1955   case ISD::FP_ROUND: {
1956     // RVV can only do fp_round to types half the size as the source. We
1957     // custom-lower f64->f16 rounds via RVV's round-to-odd float
1958     // conversion instruction.
1959     SDLoc DL(Op);
1960     MVT VT = Op.getSimpleValueType();
1961     SDValue Src = Op.getOperand(0);
1962     MVT SrcVT = Src.getSimpleValueType();
1963 
1964     // Prepare any fixed-length vector operands.
1965     MVT ContainerVT = VT;
1966     if (VT.isFixedLengthVector()) {
1967       MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
1968       ContainerVT =
1969           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
1970       Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
1971     }
1972 
1973     if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
1974         SrcVT.getVectorElementType() != MVT::f64) {
1975       // For scalable vectors, we only need to close the gap between
1976       // vXf64<->vXf16.
1977       if (!VT.isFixedLengthVector())
1978         return Op;
1979       // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
1980       Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
1981       return convertFromScalableVector(VT, Src, DAG, Subtarget);
1982     }
1983 
1984     SDValue Mask, VL;
1985     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1986 
1987     MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
1988     SDValue IntermediateRound =
1989         DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
1990     SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
1991                                           DL, DAG, Subtarget);
1992 
1993     if (VT.isFixedLengthVector())
1994       return convertFromScalableVector(VT, Round, DAG, Subtarget);
1995     return Round;
1996   }
1997   case ISD::FP_TO_SINT:
1998   case ISD::FP_TO_UINT:
1999   case ISD::SINT_TO_FP:
2000   case ISD::UINT_TO_FP: {
2001     // RVV can only do fp<->int conversions to types half/double the size as
2002     // the source. We custom-lower any conversions that do two hops into
2003     // sequences.
2004     MVT VT = Op.getSimpleValueType();
2005     if (!VT.isVector())
2006       return Op;
2007     SDLoc DL(Op);
2008     SDValue Src = Op.getOperand(0);
2009     MVT EltVT = VT.getVectorElementType();
2010     MVT SrcVT = Src.getSimpleValueType();
2011     MVT SrcEltVT = SrcVT.getVectorElementType();
2012     unsigned EltSize = EltVT.getSizeInBits();
2013     unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2014     assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2015            "Unexpected vector element types");
2016 
2017     bool IsInt2FP = SrcEltVT.isInteger();
2018     // Widening conversions
2019     if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2020       if (IsInt2FP) {
2021         // Do a regular integer sign/zero extension then convert to float.
2022         MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2023                                       VT.getVectorElementCount());
2024         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2025                                  ? ISD::ZERO_EXTEND
2026                                  : ISD::SIGN_EXTEND;
2027         SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2028         return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2029       }
2030       // FP2Int
2031       assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2032       // Do one doubling fp_extend then complete the operation by converting
2033       // to int.
2034       MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2035       SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2036       return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2037     }
2038 
2039     // Narrowing conversions
2040     if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2041       if (IsInt2FP) {
2042         // One narrowing int_to_fp, then an fp_round.
2043         assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2044         MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2045         SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2046         return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2047       }
2048       // FP2Int
2049       // One narrowing fp_to_int, then truncate the integer. If the float isn't
2050       // representable by the integer, the result is poison.
2051       MVT IVecVT =
2052           MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2053                            VT.getVectorElementCount());
2054       SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2055       return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2056     }
2057 
2058     // Scalable vectors can exit here. Patterns will handle equally-sized
2059     // conversions halving/doubling ones.
2060     if (!VT.isFixedLengthVector())
2061       return Op;
2062 
2063     // For fixed-length vectors we lower to a custom "VL" node.
2064     unsigned RVVOpc = 0;
2065     switch (Op.getOpcode()) {
2066     default:
2067       llvm_unreachable("Impossible opcode");
2068     case ISD::FP_TO_SINT:
2069       RVVOpc = RISCVISD::FP_TO_SINT_VL;
2070       break;
2071     case ISD::FP_TO_UINT:
2072       RVVOpc = RISCVISD::FP_TO_UINT_VL;
2073       break;
2074     case ISD::SINT_TO_FP:
2075       RVVOpc = RISCVISD::SINT_TO_FP_VL;
2076       break;
2077     case ISD::UINT_TO_FP:
2078       RVVOpc = RISCVISD::UINT_TO_FP_VL;
2079       break;
2080     }
2081 
2082     MVT ContainerVT, SrcContainerVT;
2083     // Derive the reference container type from the larger vector type.
2084     if (SrcEltSize > EltSize) {
2085       SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2086       ContainerVT =
2087           SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2088     } else {
2089       ContainerVT = getContainerForFixedLengthVector(VT);
2090       SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2091     }
2092 
2093     SDValue Mask, VL;
2094     std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2095 
2096     Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2097     Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2098     return convertFromScalableVector(VT, Src, DAG, Subtarget);
2099   }
2100   case ISD::VECREDUCE_ADD:
2101   case ISD::VECREDUCE_UMAX:
2102   case ISD::VECREDUCE_SMAX:
2103   case ISD::VECREDUCE_UMIN:
2104   case ISD::VECREDUCE_SMIN:
2105     return lowerVECREDUCE(Op, DAG);
2106   case ISD::VECREDUCE_AND:
2107   case ISD::VECREDUCE_OR:
2108   case ISD::VECREDUCE_XOR:
2109     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2110       return lowerVectorMaskVECREDUCE(Op, DAG);
2111     return lowerVECREDUCE(Op, DAG);
2112   case ISD::VECREDUCE_FADD:
2113   case ISD::VECREDUCE_SEQ_FADD:
2114     return lowerFPVECREDUCE(Op, DAG);
2115   case ISD::INSERT_SUBVECTOR:
2116     return lowerINSERT_SUBVECTOR(Op, DAG);
2117   case ISD::EXTRACT_SUBVECTOR:
2118     return lowerEXTRACT_SUBVECTOR(Op, DAG);
2119   case ISD::STEP_VECTOR:
2120     return lowerSTEP_VECTOR(Op, DAG);
2121   case ISD::VECTOR_REVERSE:
2122     return lowerVECTOR_REVERSE(Op, DAG);
2123   case ISD::BUILD_VECTOR:
2124     return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2125   case ISD::VECTOR_SHUFFLE:
2126     return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2127   case ISD::CONCAT_VECTORS: {
2128     // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2129     // better than going through the stack, as the default expansion does.
2130     SDLoc DL(Op);
2131     MVT VT = Op.getSimpleValueType();
2132     unsigned NumOpElts =
2133         Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2134     SDValue Vec = DAG.getUNDEF(VT);
2135     for (const auto &OpIdx : enumerate(Op->ops()))
2136       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2137                         DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2138     return Vec;
2139   }
2140   case ISD::LOAD:
2141     return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2142   case ISD::STORE:
2143     return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2144   case ISD::MLOAD:
2145     return lowerMLOAD(Op, DAG);
2146   case ISD::MSTORE:
2147     return lowerMSTORE(Op, DAG);
2148   case ISD::SETCC:
2149     return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2150   case ISD::ADD:
2151     return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2152   case ISD::SUB:
2153     return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2154   case ISD::MUL:
2155     return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2156   case ISD::MULHS:
2157     return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2158   case ISD::MULHU:
2159     return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2160   case ISD::AND:
2161     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2162                                               RISCVISD::AND_VL);
2163   case ISD::OR:
2164     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2165                                               RISCVISD::OR_VL);
2166   case ISD::XOR:
2167     return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2168                                               RISCVISD::XOR_VL);
2169   case ISD::SDIV:
2170     return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2171   case ISD::SREM:
2172     return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2173   case ISD::UDIV:
2174     return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2175   case ISD::UREM:
2176     return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2177   case ISD::SHL:
2178     return lowerToScalableOp(Op, DAG, RISCVISD::SHL_VL);
2179   case ISD::SRA:
2180     return lowerToScalableOp(Op, DAG, RISCVISD::SRA_VL);
2181   case ISD::SRL:
2182     return lowerToScalableOp(Op, DAG, RISCVISD::SRL_VL);
2183   case ISD::FADD:
2184     return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2185   case ISD::FSUB:
2186     return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2187   case ISD::FMUL:
2188     return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2189   case ISD::FDIV:
2190     return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2191   case ISD::FNEG:
2192     return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2193   case ISD::FABS:
2194     return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2195   case ISD::FSQRT:
2196     return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2197   case ISD::FMA:
2198     return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2199   case ISD::SMIN:
2200     return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2201   case ISD::SMAX:
2202     return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2203   case ISD::UMIN:
2204     return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2205   case ISD::UMAX:
2206     return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2207   case ISD::ABS:
2208     return lowerABS(Op, DAG);
2209   case ISD::VSELECT:
2210     return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2211   case ISD::FCOPYSIGN:
2212     return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2213   case ISD::MGATHER:
2214     return lowerMGATHER(Op, DAG);
2215   case ISD::MSCATTER:
2216     return lowerMSCATTER(Op, DAG);
2217   case ISD::FLT_ROUNDS_:
2218     return lowerGET_ROUNDING(Op, DAG);
2219   case ISD::SET_ROUNDING:
2220     return lowerSET_ROUNDING(Op, DAG);
2221   }
2222 }
2223 
2224 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2225                              SelectionDAG &DAG, unsigned Flags) {
2226   return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2227 }
2228 
2229 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2230                              SelectionDAG &DAG, unsigned Flags) {
2231   return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2232                                    Flags);
2233 }
2234 
2235 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2236                              SelectionDAG &DAG, unsigned Flags) {
2237   return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2238                                    N->getOffset(), Flags);
2239 }
2240 
2241 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2242                              SelectionDAG &DAG, unsigned Flags) {
2243   return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2244 }
2245 
2246 template <class NodeTy>
2247 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2248                                      bool IsLocal) const {
2249   SDLoc DL(N);
2250   EVT Ty = getPointerTy(DAG.getDataLayout());
2251 
2252   if (isPositionIndependent()) {
2253     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2254     if (IsLocal)
2255       // Use PC-relative addressing to access the symbol. This generates the
2256       // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2257       // %pcrel_lo(auipc)).
2258       return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2259 
2260     // Use PC-relative addressing to access the GOT for this symbol, then load
2261     // the address from the GOT. This generates the pattern (PseudoLA sym),
2262     // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2263     return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2264   }
2265 
2266   switch (getTargetMachine().getCodeModel()) {
2267   default:
2268     report_fatal_error("Unsupported code model for lowering");
2269   case CodeModel::Small: {
2270     // Generate a sequence for accessing addresses within the first 2 GiB of
2271     // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2272     SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2273     SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2274     SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2275     return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2276   }
2277   case CodeModel::Medium: {
2278     // Generate a sequence for accessing addresses within any 2GiB range within
2279     // the address space. This generates the pattern (PseudoLLA sym), which
2280     // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
2281     SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2282     return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2283   }
2284   }
2285 }
2286 
2287 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
2288                                                 SelectionDAG &DAG) const {
2289   SDLoc DL(Op);
2290   EVT Ty = Op.getValueType();
2291   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2292   int64_t Offset = N->getOffset();
2293   MVT XLenVT = Subtarget.getXLenVT();
2294 
2295   const GlobalValue *GV = N->getGlobal();
2296   bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2297   SDValue Addr = getAddr(N, DAG, IsLocal);
2298 
2299   // In order to maximise the opportunity for common subexpression elimination,
2300   // emit a separate ADD node for the global address offset instead of folding
2301   // it in the global address node. Later peephole optimisations may choose to
2302   // fold it back in when profitable.
2303   if (Offset != 0)
2304     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2305                        DAG.getConstant(Offset, DL, XLenVT));
2306   return Addr;
2307 }
2308 
2309 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
2310                                                SelectionDAG &DAG) const {
2311   BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2312 
2313   return getAddr(N, DAG);
2314 }
2315 
2316 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
2317                                                SelectionDAG &DAG) const {
2318   ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2319 
2320   return getAddr(N, DAG);
2321 }
2322 
2323 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
2324                                             SelectionDAG &DAG) const {
2325   JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2326 
2327   return getAddr(N, DAG);
2328 }
2329 
2330 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
2331                                               SelectionDAG &DAG,
2332                                               bool UseGOT) const {
2333   SDLoc DL(N);
2334   EVT Ty = getPointerTy(DAG.getDataLayout());
2335   const GlobalValue *GV = N->getGlobal();
2336   MVT XLenVT = Subtarget.getXLenVT();
2337 
2338   if (UseGOT) {
2339     // Use PC-relative addressing to access the GOT for this TLS symbol, then
2340     // load the address from the GOT and add the thread pointer. This generates
2341     // the pattern (PseudoLA_TLS_IE sym), which expands to
2342     // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
2343     SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2344     SDValue Load =
2345         SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
2346 
2347     // Add the thread pointer.
2348     SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2349     return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
2350   }
2351 
2352   // Generate a sequence for accessing the address relative to the thread
2353   // pointer, with the appropriate adjustment for the thread pointer offset.
2354   // This generates the pattern
2355   // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
2356   SDValue AddrHi =
2357       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
2358   SDValue AddrAdd =
2359       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
2360   SDValue AddrLo =
2361       DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
2362 
2363   SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2364   SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
2365   SDValue MNAdd = SDValue(
2366       DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
2367       0);
2368   return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
2369 }
2370 
2371 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
2372                                                SelectionDAG &DAG) const {
2373   SDLoc DL(N);
2374   EVT Ty = getPointerTy(DAG.getDataLayout());
2375   IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
2376   const GlobalValue *GV = N->getGlobal();
2377 
2378   // Use a PC-relative addressing mode to access the global dynamic GOT address.
2379   // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
2380   // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
2381   SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
2382   SDValue Load =
2383       SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
2384 
2385   // Prepare argument list to generate call.
2386   ArgListTy Args;
2387   ArgListEntry Entry;
2388   Entry.Node = Load;
2389   Entry.Ty = CallTy;
2390   Args.push_back(Entry);
2391 
2392   // Setup call to __tls_get_addr.
2393   TargetLowering::CallLoweringInfo CLI(DAG);
2394   CLI.setDebugLoc(DL)
2395       .setChain(DAG.getEntryNode())
2396       .setLibCallee(CallingConv::C, CallTy,
2397                     DAG.getExternalSymbol("__tls_get_addr", Ty),
2398                     std::move(Args));
2399 
2400   return LowerCallTo(CLI).first;
2401 }
2402 
2403 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
2404                                                    SelectionDAG &DAG) const {
2405   SDLoc DL(Op);
2406   EVT Ty = Op.getValueType();
2407   GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2408   int64_t Offset = N->getOffset();
2409   MVT XLenVT = Subtarget.getXLenVT();
2410 
2411   TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
2412 
2413   if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2414       CallingConv::GHC)
2415     report_fatal_error("In GHC calling convention TLS is not supported");
2416 
2417   SDValue Addr;
2418   switch (Model) {
2419   case TLSModel::LocalExec:
2420     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
2421     break;
2422   case TLSModel::InitialExec:
2423     Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
2424     break;
2425   case TLSModel::LocalDynamic:
2426   case TLSModel::GeneralDynamic:
2427     Addr = getDynamicTLSAddr(N, DAG);
2428     break;
2429   }
2430 
2431   // In order to maximise the opportunity for common subexpression elimination,
2432   // emit a separate ADD node for the global address offset instead of folding
2433   // it in the global address node. Later peephole optimisations may choose to
2434   // fold it back in when profitable.
2435   if (Offset != 0)
2436     return DAG.getNode(ISD::ADD, DL, Ty, Addr,
2437                        DAG.getConstant(Offset, DL, XLenVT));
2438   return Addr;
2439 }
2440 
2441 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2442   SDValue CondV = Op.getOperand(0);
2443   SDValue TrueV = Op.getOperand(1);
2444   SDValue FalseV = Op.getOperand(2);
2445   SDLoc DL(Op);
2446   MVT XLenVT = Subtarget.getXLenVT();
2447 
2448   // If the result type is XLenVT and CondV is the output of a SETCC node
2449   // which also operated on XLenVT inputs, then merge the SETCC node into the
2450   // lowered RISCVISD::SELECT_CC to take advantage of the integer
2451   // compare+branch instructions. i.e.:
2452   // (select (setcc lhs, rhs, cc), truev, falsev)
2453   // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
2454   if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
2455       CondV.getOperand(0).getSimpleValueType() == XLenVT) {
2456     SDValue LHS = CondV.getOperand(0);
2457     SDValue RHS = CondV.getOperand(1);
2458     auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
2459     ISD::CondCode CCVal = CC->get();
2460 
2461     // Special case for a select of 2 constants that have a diffence of 1.
2462     // Normally this is done by DAGCombine, but if the select is introduced by
2463     // type legalization or op legalization, we miss it. Restricting to SETLT
2464     // case for now because that is what signed saturating add/sub need.
2465     // FIXME: We don't need the condition to be SETLT or even a SETCC,
2466     // but we would probably want to swap the true/false values if the condition
2467     // is SETGE/SETLE to avoid an XORI.
2468     if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
2469         CCVal == ISD::SETLT) {
2470       const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
2471       const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
2472       if (TrueVal - 1 == FalseVal)
2473         return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
2474       if (TrueVal + 1 == FalseVal)
2475         return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
2476     }
2477 
2478     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2479 
2480     SDValue TargetCC = DAG.getTargetConstant(CCVal, DL, XLenVT);
2481     SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
2482     return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2483   }
2484 
2485   // Otherwise:
2486   // (select condv, truev, falsev)
2487   // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
2488   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2489   SDValue SetNE = DAG.getTargetConstant(ISD::SETNE, DL, XLenVT);
2490 
2491   SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
2492 
2493   return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
2494 }
2495 
2496 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2497   SDValue CondV = Op.getOperand(1);
2498   SDLoc DL(Op);
2499   MVT XLenVT = Subtarget.getXLenVT();
2500 
2501   if (CondV.getOpcode() == ISD::SETCC &&
2502       CondV.getOperand(0).getValueType() == XLenVT) {
2503     SDValue LHS = CondV.getOperand(0);
2504     SDValue RHS = CondV.getOperand(1);
2505     ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
2506 
2507     translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
2508 
2509     SDValue TargetCC = DAG.getCondCode(CCVal);
2510     return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2511                        LHS, RHS, TargetCC, Op.getOperand(2));
2512   }
2513 
2514   return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
2515                      CondV, DAG.getConstant(0, DL, XLenVT),
2516                      DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
2517 }
2518 
2519 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2520   MachineFunction &MF = DAG.getMachineFunction();
2521   RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
2522 
2523   SDLoc DL(Op);
2524   SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2525                                  getPointerTy(MF.getDataLayout()));
2526 
2527   // vastart just stores the address of the VarArgsFrameIndex slot into the
2528   // memory location argument.
2529   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2530   return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2531                       MachinePointerInfo(SV));
2532 }
2533 
2534 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
2535                                             SelectionDAG &DAG) const {
2536   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2537   MachineFunction &MF = DAG.getMachineFunction();
2538   MachineFrameInfo &MFI = MF.getFrameInfo();
2539   MFI.setFrameAddressIsTaken(true);
2540   Register FrameReg = RI.getFrameRegister(MF);
2541   int XLenInBytes = Subtarget.getXLen() / 8;
2542 
2543   EVT VT = Op.getValueType();
2544   SDLoc DL(Op);
2545   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
2546   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2547   while (Depth--) {
2548     int Offset = -(XLenInBytes * 2);
2549     SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
2550                               DAG.getIntPtrConstant(Offset, DL));
2551     FrameAddr =
2552         DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
2553   }
2554   return FrameAddr;
2555 }
2556 
2557 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
2558                                              SelectionDAG &DAG) const {
2559   const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
2560   MachineFunction &MF = DAG.getMachineFunction();
2561   MachineFrameInfo &MFI = MF.getFrameInfo();
2562   MFI.setReturnAddressIsTaken(true);
2563   MVT XLenVT = Subtarget.getXLenVT();
2564   int XLenInBytes = Subtarget.getXLen() / 8;
2565 
2566   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2567     return SDValue();
2568 
2569   EVT VT = Op.getValueType();
2570   SDLoc DL(Op);
2571   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2572   if (Depth) {
2573     int Off = -XLenInBytes;
2574     SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
2575     SDValue Offset = DAG.getConstant(Off, DL, VT);
2576     return DAG.getLoad(VT, DL, DAG.getEntryNode(),
2577                        DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
2578                        MachinePointerInfo());
2579   }
2580 
2581   // Return the value of the return address register, marking it an implicit
2582   // live-in.
2583   Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
2584   return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
2585 }
2586 
2587 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
2588                                                  SelectionDAG &DAG) const {
2589   SDLoc DL(Op);
2590   SDValue Lo = Op.getOperand(0);
2591   SDValue Hi = Op.getOperand(1);
2592   SDValue Shamt = Op.getOperand(2);
2593   EVT VT = Lo.getValueType();
2594 
2595   // if Shamt-XLEN < 0: // Shamt < XLEN
2596   //   Lo = Lo << Shamt
2597   //   Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
2598   // else:
2599   //   Lo = 0
2600   //   Hi = Lo << (Shamt-XLEN)
2601 
2602   SDValue Zero = DAG.getConstant(0, DL, VT);
2603   SDValue One = DAG.getConstant(1, DL, VT);
2604   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2605   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2606   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2607   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2608 
2609   SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2610   SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
2611   SDValue ShiftRightLo =
2612       DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
2613   SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2614   SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2615   SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
2616 
2617   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2618 
2619   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
2620   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2621 
2622   SDValue Parts[2] = {Lo, Hi};
2623   return DAG.getMergeValues(Parts, DL);
2624 }
2625 
2626 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2627                                                   bool IsSRA) const {
2628   SDLoc DL(Op);
2629   SDValue Lo = Op.getOperand(0);
2630   SDValue Hi = Op.getOperand(1);
2631   SDValue Shamt = Op.getOperand(2);
2632   EVT VT = Lo.getValueType();
2633 
2634   // SRA expansion:
2635   //   if Shamt-XLEN < 0: // Shamt < XLEN
2636   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2637   //     Hi = Hi >>s Shamt
2638   //   else:
2639   //     Lo = Hi >>s (Shamt-XLEN);
2640   //     Hi = Hi >>s (XLEN-1)
2641   //
2642   // SRL expansion:
2643   //   if Shamt-XLEN < 0: // Shamt < XLEN
2644   //     Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
2645   //     Hi = Hi >>u Shamt
2646   //   else:
2647   //     Lo = Hi >>u (Shamt-XLEN);
2648   //     Hi = 0;
2649 
2650   unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
2651 
2652   SDValue Zero = DAG.getConstant(0, DL, VT);
2653   SDValue One = DAG.getConstant(1, DL, VT);
2654   SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
2655   SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
2656   SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
2657   SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
2658 
2659   SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2660   SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
2661   SDValue ShiftLeftHi =
2662       DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
2663   SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
2664   SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
2665   SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
2666   SDValue HiFalse =
2667       IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
2668 
2669   SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
2670 
2671   Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
2672   Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
2673 
2674   SDValue Parts[2] = {Lo, Hi};
2675   return DAG.getMergeValues(Parts, DL);
2676 }
2677 
2678 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
2679 // illegal (currently only vXi64 RV32).
2680 // FIXME: We could also catch non-constant sign-extended i32 values and lower
2681 // them to SPLAT_VECTOR_I64
2682 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
2683                                                      SelectionDAG &DAG) const {
2684   SDLoc DL(Op);
2685   EVT VecVT = Op.getValueType();
2686   assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
2687          "Unexpected SPLAT_VECTOR_PARTS lowering");
2688 
2689   assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
2690   SDValue Lo = Op.getOperand(0);
2691   SDValue Hi = Op.getOperand(1);
2692 
2693   if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
2694     int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
2695     int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
2696     // If Hi constant is all the same sign bit as Lo, lower this as a custom
2697     // node in order to try and match RVV vector/scalar instructions.
2698     if ((LoC >> 31) == HiC)
2699       return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2700   }
2701 
2702   // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
2703   if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
2704       isa<ConstantSDNode>(Hi.getOperand(1)) &&
2705       Hi.getConstantOperandVal(1) == 31)
2706     return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2707 
2708   // Else, on RV32 we lower an i64-element SPLAT_VECTOR thus, being careful not
2709   // to accidentally sign-extend the 32-bit halves to the e64 SEW:
2710   // vmv.v.x vX, hi
2711   // vsll.vx vX, vX, /*32*/
2712   // vmv.v.x vY, lo
2713   // vsll.vx vY, vY, /*32*/
2714   // vsrl.vx vY, vY, /*32*/
2715   // vor.vv vX, vX, vY
2716   SDValue ThirtyTwoV = DAG.getConstant(32, DL, VecVT);
2717 
2718   Lo = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
2719   Lo = DAG.getNode(ISD::SHL, DL, VecVT, Lo, ThirtyTwoV);
2720   Lo = DAG.getNode(ISD::SRL, DL, VecVT, Lo, ThirtyTwoV);
2721 
2722   if (isNullConstant(Hi))
2723     return Lo;
2724 
2725   Hi = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Hi);
2726   Hi = DAG.getNode(ISD::SHL, DL, VecVT, Hi, ThirtyTwoV);
2727 
2728   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
2729 }
2730 
2731 // Custom-lower extensions from mask vectors by using a vselect either with 1
2732 // for zero/any-extension or -1 for sign-extension:
2733 //   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
2734 // Note that any-extension is lowered identically to zero-extension.
2735 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
2736                                                 int64_t ExtTrueVal) const {
2737   SDLoc DL(Op);
2738   MVT VecVT = Op.getSimpleValueType();
2739   SDValue Src = Op.getOperand(0);
2740   // Only custom-lower extensions from mask types
2741   assert(Src.getValueType().isVector() &&
2742          Src.getValueType().getVectorElementType() == MVT::i1);
2743 
2744   MVT XLenVT = Subtarget.getXLenVT();
2745   SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
2746   SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
2747 
2748   if (VecVT.isScalableVector()) {
2749     // Be careful not to introduce illegal scalar types at this stage, and be
2750     // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
2751     // illegal and must be expanded. Since we know that the constants are
2752     // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
2753     bool IsRV32E64 =
2754         !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
2755 
2756     if (!IsRV32E64) {
2757       SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
2758       SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
2759     } else {
2760       SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
2761       SplatTrueVal =
2762           DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
2763     }
2764 
2765     return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
2766   }
2767 
2768   MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
2769   MVT I1ContainerVT =
2770       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2771 
2772   SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
2773 
2774   SDValue Mask, VL;
2775   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2776 
2777   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
2778   SplatTrueVal =
2779       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
2780   SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
2781                                SplatTrueVal, SplatZero, VL);
2782 
2783   return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
2784 }
2785 
2786 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
2787     SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
2788   MVT ExtVT = Op.getSimpleValueType();
2789   // Only custom-lower extensions from fixed-length vector types.
2790   if (!ExtVT.isFixedLengthVector())
2791     return Op;
2792   MVT VT = Op.getOperand(0).getSimpleValueType();
2793   // Grab the canonical container type for the extended type. Infer the smaller
2794   // type from that to ensure the same number of vector elements, as we know
2795   // the LMUL will be sufficient to hold the smaller type.
2796   MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
2797   // Get the extended container type manually to ensure the same number of
2798   // vector elements between source and dest.
2799   MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
2800                                      ContainerExtVT.getVectorElementCount());
2801 
2802   SDValue Op1 =
2803       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
2804 
2805   SDLoc DL(Op);
2806   SDValue Mask, VL;
2807   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2808 
2809   SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
2810 
2811   return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
2812 }
2813 
2814 // Custom-lower truncations from vectors to mask vectors by using a mask and a
2815 // setcc operation:
2816 //   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
2817 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
2818                                                   SelectionDAG &DAG) const {
2819   SDLoc DL(Op);
2820   EVT MaskVT = Op.getValueType();
2821   // Only expect to custom-lower truncations to mask types
2822   assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
2823          "Unexpected type for vector mask lowering");
2824   SDValue Src = Op.getOperand(0);
2825   MVT VecVT = Src.getSimpleValueType();
2826 
2827   // If this is a fixed vector, we need to convert it to a scalable vector.
2828   MVT ContainerVT = VecVT;
2829   if (VecVT.isFixedLengthVector()) {
2830     ContainerVT = getContainerForFixedLengthVector(VecVT);
2831     Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2832   }
2833 
2834   SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
2835   SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
2836 
2837   SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
2838   SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
2839 
2840   if (VecVT.isScalableVector()) {
2841     SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
2842     return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
2843   }
2844 
2845   SDValue Mask, VL;
2846   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2847 
2848   MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2849   SDValue Trunc =
2850       DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
2851   Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
2852                       DAG.getCondCode(ISD::SETNE), Mask, VL);
2853   return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
2854 }
2855 
2856 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
2857 // first position of a vector, and that vector is slid up to the insert index.
2858 // By limiting the active vector length to index+1 and merging with the
2859 // original vector (with an undisturbed tail policy for elements >= VL), we
2860 // achieve the desired result of leaving all elements untouched except the one
2861 // at VL-1, which is replaced with the desired value.
2862 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
2863                                                     SelectionDAG &DAG) const {
2864   SDLoc DL(Op);
2865   MVT VecVT = Op.getSimpleValueType();
2866   SDValue Vec = Op.getOperand(0);
2867   SDValue Val = Op.getOperand(1);
2868   SDValue Idx = Op.getOperand(2);
2869 
2870   MVT ContainerVT = VecVT;
2871   // If the operand is a fixed-length vector, convert to a scalable one.
2872   if (VecVT.isFixedLengthVector()) {
2873     ContainerVT = getContainerForFixedLengthVector(VecVT);
2874     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2875   }
2876 
2877   MVT XLenVT = Subtarget.getXLenVT();
2878 
2879   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
2880   bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
2881   // Even i64-element vectors on RV32 can be lowered without scalar
2882   // legalization if the most-significant 32 bits of the value are not affected
2883   // by the sign-extension of the lower 32 bits.
2884   // TODO: We could also catch sign extensions of a 32-bit value.
2885   if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
2886     const auto *CVal = cast<ConstantSDNode>(Val);
2887     if (isInt<32>(CVal->getSExtValue())) {
2888       IsLegalInsert = true;
2889       Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
2890     }
2891   }
2892 
2893   SDValue Mask, VL;
2894   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
2895 
2896   SDValue ValInVec;
2897 
2898   if (IsLegalInsert) {
2899     unsigned Opc =
2900         VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
2901     if (isNullConstant(Idx)) {
2902       Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
2903       if (!VecVT.isFixedLengthVector())
2904         return Vec;
2905       return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
2906     }
2907     ValInVec =
2908         DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
2909   } else {
2910     // On RV32, i64-element vectors must be specially handled to place the
2911     // value at element 0, by using two vslide1up instructions in sequence on
2912     // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
2913     // this.
2914     SDValue One = DAG.getConstant(1, DL, XLenVT);
2915     SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
2916     SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
2917     MVT I32ContainerVT =
2918         MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
2919     SDValue I32Mask =
2920         getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
2921     // Limit the active VL to two.
2922     SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
2923     // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
2924     // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
2925     ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
2926                            InsertI64VL);
2927     // First slide in the hi value, then the lo in underneath it.
2928     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
2929                            ValHi, I32Mask, InsertI64VL);
2930     ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
2931                            ValLo, I32Mask, InsertI64VL);
2932     // Bitcast back to the right container type.
2933     ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
2934   }
2935 
2936   // Now that the value is in a vector, slide it into position.
2937   SDValue InsertVL =
2938       DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
2939   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
2940                                 ValInVec, Idx, Mask, InsertVL);
2941   if (!VecVT.isFixedLengthVector())
2942     return Slideup;
2943   return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
2944 }
2945 
2946 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
2947 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
2948 // types this is done using VMV_X_S to allow us to glean information about the
2949 // sign bits of the result.
2950 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
2951                                                      SelectionDAG &DAG) const {
2952   SDLoc DL(Op);
2953   SDValue Idx = Op.getOperand(1);
2954   SDValue Vec = Op.getOperand(0);
2955   EVT EltVT = Op.getValueType();
2956   MVT VecVT = Vec.getSimpleValueType();
2957   MVT XLenVT = Subtarget.getXLenVT();
2958 
2959   if (VecVT.getVectorElementType() == MVT::i1) {
2960     // FIXME: For now we just promote to an i8 vector and extract from that,
2961     // but this is probably not optimal.
2962     MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
2963     Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
2964     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
2965   }
2966 
2967   // If this is a fixed vector, we need to convert it to a scalable vector.
2968   MVT ContainerVT = VecVT;
2969   if (VecVT.isFixedLengthVector()) {
2970     ContainerVT = getContainerForFixedLengthVector(VecVT);
2971     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
2972   }
2973 
2974   // If the index is 0, the vector is already in the right position.
2975   if (!isNullConstant(Idx)) {
2976     // Use a VL of 1 to avoid processing more elements than we need.
2977     SDValue VL = DAG.getConstant(1, DL, XLenVT);
2978     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
2979     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
2980     Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
2981                       DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
2982   }
2983 
2984   if (!EltVT.isInteger()) {
2985     // Floating-point extracts are handled in TableGen.
2986     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
2987                        DAG.getConstant(0, DL, XLenVT));
2988   }
2989 
2990   SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
2991   return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
2992 }
2993 
2994 // Some RVV intrinsics may claim that they want an integer operand to be
2995 // promoted or expanded.
2996 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
2997                                           const RISCVSubtarget &Subtarget) {
2998   assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2999           Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3000          "Unexpected opcode");
3001 
3002   if (!Subtarget.hasStdExtV())
3003     return SDValue();
3004 
3005   bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3006   unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3007   SDLoc DL(Op);
3008 
3009   const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3010       RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3011   if (!II || !II->SplatOperand)
3012     return SDValue();
3013 
3014   unsigned SplatOp = II->SplatOperand + HasChain;
3015   assert(SplatOp < Op.getNumOperands());
3016 
3017   SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3018   SDValue &ScalarOp = Operands[SplatOp];
3019   MVT OpVT = ScalarOp.getSimpleValueType();
3020   MVT XLenVT = Subtarget.getXLenVT();
3021 
3022   // If this isn't a scalar, or its type is XLenVT we're done.
3023   if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3024     return SDValue();
3025 
3026   // Simplest case is that the operand needs to be promoted to XLenVT.
3027   if (OpVT.bitsLT(XLenVT)) {
3028     // If the operand is a constant, sign extend to increase our chances
3029     // of being able to use a .vi instruction. ANY_EXTEND would become a
3030     // a zero extend and the simm5 check in isel would fail.
3031     // FIXME: Should we ignore the upper bits in isel instead?
3032     unsigned ExtOpc =
3033         isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3034     ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3035     return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3036   }
3037 
3038   // Use the previous operand to get the vXi64 VT. The result might be a mask
3039   // VT for compares. Using the previous operand assumes that the previous
3040   // operand will never have a smaller element size than a scalar operand and
3041   // that a widening operation never uses SEW=64.
3042   // NOTE: If this fails the below assert, we can probably just find the
3043   // element count from any operand or result and use it to construct the VT.
3044   assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3045   MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3046 
3047   // The more complex case is when the scalar is larger than XLenVT.
3048   assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3049          VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3050 
3051   // If this is a sign-extended 32-bit constant, we can truncate it and rely
3052   // on the instruction to sign-extend since SEW>XLEN.
3053   if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3054     if (isInt<32>(CVal->getSExtValue())) {
3055       ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3056       return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3057     }
3058   }
3059 
3060   // We need to convert the scalar to a splat vector.
3061   // FIXME: Can we implicitly truncate the scalar if it is known to
3062   // be sign extended?
3063   // VL should be the last operand.
3064   SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3065   assert(VL.getValueType() == XLenVT);
3066   ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG, Subtarget);
3067   return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3068 }
3069 
3070 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3071                                                      SelectionDAG &DAG) const {
3072   unsigned IntNo = Op.getConstantOperandVal(0);
3073   SDLoc DL(Op);
3074   MVT XLenVT = Subtarget.getXLenVT();
3075 
3076   switch (IntNo) {
3077   default:
3078     break; // Don't custom lower most intrinsics.
3079   case Intrinsic::thread_pointer: {
3080     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3081     return DAG.getRegister(RISCV::X4, PtrVT);
3082   }
3083   case Intrinsic::riscv_orc_b:
3084     // Lower to the GORCI encoding for orc.b.
3085     return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3086                        DAG.getConstant(7, DL, XLenVT));
3087   case Intrinsic::riscv_vmv_x_s:
3088     assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3089     return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3090                        Op.getOperand(1));
3091   case Intrinsic::riscv_vmv_v_x:
3092     return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3093                             Op.getSimpleValueType(), DL, DAG, Subtarget);
3094   case Intrinsic::riscv_vfmv_v_f:
3095     return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3096                        Op.getOperand(1), Op.getOperand(2));
3097   case Intrinsic::riscv_vmv_s_x: {
3098     SDValue Scalar = Op.getOperand(2);
3099 
3100     if (Scalar.getValueType().bitsLE(XLenVT)) {
3101       Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3102       return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3103                          Op.getOperand(1), Scalar, Op.getOperand(3));
3104     }
3105 
3106     assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3107 
3108     // This is an i64 value that lives in two scalar registers. We have to
3109     // insert this in a convoluted way. First we build vXi64 splat containing
3110     // the/ two values that we assemble using some bit math. Next we'll use
3111     // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3112     // to merge element 0 from our splat into the source vector.
3113     // FIXME: This is probably not the best way to do this, but it is
3114     // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3115     // point.
3116     //   vmv.v.x vX, hi
3117     //   vsll.vx vX, vX, /*32*/
3118     //   vmv.v.x vY, lo
3119     //   vsll.vx vY, vY, /*32*/
3120     //   vsrl.vx vY, vY, /*32*/
3121     //   vor.vv vX, vX, vY
3122     //
3123     //   vid.v      vVid
3124     //   vmseq.vx   mMask, vVid, 0
3125     //   vmerge.vvm vDest, vSrc, vVal, mMask
3126     MVT VT = Op.getSimpleValueType();
3127     SDValue Vec = Op.getOperand(1);
3128     SDValue VL = Op.getOperand(3);
3129 
3130     SDValue SplattedVal =
3131         splatSplitI64WithVL(DL, VT, Scalar, VL, DAG, Subtarget);
3132     SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3133                                       DAG.getConstant(0, DL, MVT::i32), VL);
3134 
3135     MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3136     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3137     SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3138     SDValue SelectCond =
3139         DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3140                     DAG.getCondCode(ISD::SETEQ), Mask, VL);
3141     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3142                        Vec, VL);
3143   }
3144   case Intrinsic::riscv_vslide1up:
3145   case Intrinsic::riscv_vslide1down:
3146   case Intrinsic::riscv_vslide1up_mask:
3147   case Intrinsic::riscv_vslide1down_mask: {
3148     // We need to special case these when the scalar is larger than XLen.
3149     unsigned NumOps = Op.getNumOperands();
3150     bool IsMasked = NumOps == 6;
3151     unsigned OpOffset = IsMasked ? 1 : 0;
3152     SDValue Scalar = Op.getOperand(2 + OpOffset);
3153     if (Scalar.getValueType().bitsLE(XLenVT))
3154       break;
3155 
3156     // Splatting a sign extended constant is fine.
3157     if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3158       if (isInt<32>(CVal->getSExtValue()))
3159         break;
3160 
3161     MVT VT = Op.getSimpleValueType();
3162     assert(VT.getVectorElementType() == MVT::i64 &&
3163            Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3164 
3165     // Convert the vector source to the equivalent nxvXi32 vector.
3166     MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3167     SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3168 
3169     SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3170                                    DAG.getConstant(0, DL, XLenVT));
3171     SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3172                                    DAG.getConstant(1, DL, XLenVT));
3173 
3174     // Double the VL since we halved SEW.
3175     SDValue VL = Op.getOperand(NumOps - 1);
3176     SDValue I32VL =
3177         DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3178 
3179     MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3180     SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3181 
3182     // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3183     // instructions.
3184     if (IntNo == Intrinsic::riscv_vslide1up ||
3185         IntNo == Intrinsic::riscv_vslide1up_mask) {
3186       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3187                         I32Mask, I32VL);
3188       Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3189                         I32Mask, I32VL);
3190     } else {
3191       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3192                         I32Mask, I32VL);
3193       Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3194                         I32Mask, I32VL);
3195     }
3196 
3197     // Convert back to nxvXi64.
3198     Vec = DAG.getBitcast(VT, Vec);
3199 
3200     if (!IsMasked)
3201       return Vec;
3202 
3203     // Apply mask after the operation.
3204     SDValue Mask = Op.getOperand(NumOps - 2);
3205     SDValue MaskedOff = Op.getOperand(1);
3206     return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3207   }
3208   }
3209 
3210   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3211 }
3212 
3213 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3214                                                     SelectionDAG &DAG) const {
3215   return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3216 }
3217 
3218 static MVT getLMUL1VT(MVT VT) {
3219   assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
3220          "Unexpected vector MVT");
3221   return MVT::getScalableVectorVT(
3222       VT.getVectorElementType(),
3223       RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
3224 }
3225 
3226 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
3227   switch (ISDOpcode) {
3228   default:
3229     llvm_unreachable("Unhandled reduction");
3230   case ISD::VECREDUCE_ADD:
3231     return RISCVISD::VECREDUCE_ADD_VL;
3232   case ISD::VECREDUCE_UMAX:
3233     return RISCVISD::VECREDUCE_UMAX_VL;
3234   case ISD::VECREDUCE_SMAX:
3235     return RISCVISD::VECREDUCE_SMAX_VL;
3236   case ISD::VECREDUCE_UMIN:
3237     return RISCVISD::VECREDUCE_UMIN_VL;
3238   case ISD::VECREDUCE_SMIN:
3239     return RISCVISD::VECREDUCE_SMIN_VL;
3240   case ISD::VECREDUCE_AND:
3241     return RISCVISD::VECREDUCE_AND_VL;
3242   case ISD::VECREDUCE_OR:
3243     return RISCVISD::VECREDUCE_OR_VL;
3244   case ISD::VECREDUCE_XOR:
3245     return RISCVISD::VECREDUCE_XOR_VL;
3246   }
3247 }
3248 
3249 SDValue RISCVTargetLowering::lowerVectorMaskVECREDUCE(SDValue Op,
3250                                                       SelectionDAG &DAG) const {
3251   SDLoc DL(Op);
3252   SDValue Vec = Op.getOperand(0);
3253   MVT VecVT = Vec.getSimpleValueType();
3254   assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
3255           Op.getOpcode() == ISD::VECREDUCE_OR ||
3256           Op.getOpcode() == ISD::VECREDUCE_XOR) &&
3257          "Unexpected reduction lowering");
3258 
3259   MVT XLenVT = Subtarget.getXLenVT();
3260   assert(Op.getValueType() == XLenVT &&
3261          "Expected reduction output to be legalized to XLenVT");
3262 
3263   MVT ContainerVT = VecVT;
3264   if (VecVT.isFixedLengthVector()) {
3265     ContainerVT = getContainerForFixedLengthVector(VecVT);
3266     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3267   }
3268 
3269   SDValue Mask, VL;
3270   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3271   SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3272 
3273   switch (Op.getOpcode()) {
3274   default:
3275     llvm_unreachable("Unhandled reduction");
3276   case ISD::VECREDUCE_AND:
3277     // vpopc ~x == 0
3278     Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, Mask, VL);
3279     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3280     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETEQ);
3281   case ISD::VECREDUCE_OR:
3282     // vpopc x != 0
3283     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3284     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3285   case ISD::VECREDUCE_XOR: {
3286     // ((vpopc x) & 1) != 0
3287     SDValue One = DAG.getConstant(1, DL, XLenVT);
3288     Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
3289     Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
3290     return DAG.getSetCC(DL, XLenVT, Vec, Zero, ISD::SETNE);
3291   }
3292   }
3293 }
3294 
3295 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
3296                                             SelectionDAG &DAG) const {
3297   SDLoc DL(Op);
3298   SDValue Vec = Op.getOperand(0);
3299   EVT VecEVT = Vec.getValueType();
3300 
3301   unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
3302 
3303   // Due to ordering in legalize types we may have a vector type that needs to
3304   // be split. Do that manually so we can get down to a legal type.
3305   while (getTypeAction(*DAG.getContext(), VecEVT) ==
3306          TargetLowering::TypeSplitVector) {
3307     SDValue Lo, Hi;
3308     std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
3309     VecEVT = Lo.getValueType();
3310     Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
3311   }
3312 
3313   // TODO: The type may need to be widened rather than split. Or widened before
3314   // it can be split.
3315   if (!isTypeLegal(VecEVT))
3316     return SDValue();
3317 
3318   MVT VecVT = VecEVT.getSimpleVT();
3319   MVT VecEltVT = VecVT.getVectorElementType();
3320   unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
3321 
3322   MVT ContainerVT = VecVT;
3323   if (VecVT.isFixedLengthVector()) {
3324     ContainerVT = getContainerForFixedLengthVector(VecVT);
3325     Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3326   }
3327 
3328   MVT M1VT = getLMUL1VT(ContainerVT);
3329 
3330   SDValue Mask, VL;
3331   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3332 
3333   // FIXME: This is a VLMAX splat which might be too large and can prevent
3334   // vsetvli removal.
3335   SDValue NeutralElem =
3336       DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
3337   SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
3338   SDValue Reduction =
3339       DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
3340   SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3341                              DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3342   return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
3343 }
3344 
3345 // Given a reduction op, this function returns the matching reduction opcode,
3346 // the vector SDValue and the scalar SDValue required to lower this to a
3347 // RISCVISD node.
3348 static std::tuple<unsigned, SDValue, SDValue>
3349 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
3350   SDLoc DL(Op);
3351   switch (Op.getOpcode()) {
3352   default:
3353     llvm_unreachable("Unhandled reduction");
3354   case ISD::VECREDUCE_FADD:
3355     return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
3356                            DAG.getConstantFP(0.0, DL, EltVT));
3357   case ISD::VECREDUCE_SEQ_FADD:
3358     return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
3359                            Op.getOperand(0));
3360   }
3361 }
3362 
3363 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
3364                                               SelectionDAG &DAG) const {
3365   SDLoc DL(Op);
3366   MVT VecEltVT = Op.getSimpleValueType();
3367 
3368   unsigned RVVOpcode;
3369   SDValue VectorVal, ScalarVal;
3370   std::tie(RVVOpcode, VectorVal, ScalarVal) =
3371       getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
3372   MVT VecVT = VectorVal.getSimpleValueType();
3373 
3374   MVT ContainerVT = VecVT;
3375   if (VecVT.isFixedLengthVector()) {
3376     ContainerVT = getContainerForFixedLengthVector(VecVT);
3377     VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
3378   }
3379 
3380   MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
3381 
3382   SDValue Mask, VL;
3383   std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3384 
3385   // FIXME: This is a VLMAX splat which might be too large and can prevent
3386   // vsetvli removal.
3387   SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
3388   SDValue Reduction =
3389       DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
3390   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
3391                      DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3392 }
3393 
3394 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
3395                                                    SelectionDAG &DAG) const {
3396   SDValue Vec = Op.getOperand(0);
3397   SDValue SubVec = Op.getOperand(1);
3398   MVT VecVT = Vec.getSimpleValueType();
3399   MVT SubVecVT = SubVec.getSimpleValueType();
3400 
3401   SDLoc DL(Op);
3402   MVT XLenVT = Subtarget.getXLenVT();
3403   unsigned OrigIdx = Op.getConstantOperandVal(2);
3404   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3405 
3406   // We don't have the ability to slide mask vectors up indexed by their i1
3407   // elements; the smallest we can do is i8. Often we are able to bitcast to
3408   // equivalent i8 vectors. Note that when inserting a fixed-length vector
3409   // into a scalable one, we might not necessarily have enough scalable
3410   // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
3411   if (SubVecVT.getVectorElementType() == MVT::i1 &&
3412       (OrigIdx != 0 || !Vec.isUndef())) {
3413     if (VecVT.getVectorMinNumElements() >= 8 &&
3414         SubVecVT.getVectorMinNumElements() >= 8) {
3415       assert(OrigIdx % 8 == 0 && "Invalid index");
3416       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3417              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3418              "Unexpected mask vector lowering");
3419       OrigIdx /= 8;
3420       SubVecVT =
3421           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3422                            SubVecVT.isScalableVector());
3423       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3424                                VecVT.isScalableVector());
3425       Vec = DAG.getBitcast(VecVT, Vec);
3426       SubVec = DAG.getBitcast(SubVecVT, SubVec);
3427     } else {
3428       // We can't slide this mask vector up indexed by its i1 elements.
3429       // This poses a problem when we wish to insert a scalable vector which
3430       // can't be re-expressed as a larger type. Just choose the slow path and
3431       // extend to a larger type, then truncate back down.
3432       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3433       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3434       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3435       SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
3436       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
3437                         Op.getOperand(2));
3438       SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
3439       return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
3440     }
3441   }
3442 
3443   // If the subvector vector is a fixed-length type, we cannot use subregister
3444   // manipulation to simplify the codegen; we don't know which register of a
3445   // LMUL group contains the specific subvector as we only know the minimum
3446   // register size. Therefore we must slide the vector group up the full
3447   // amount.
3448   if (SubVecVT.isFixedLengthVector()) {
3449     if (OrigIdx == 0 && Vec.isUndef())
3450       return Op;
3451     MVT ContainerVT = VecVT;
3452     if (VecVT.isFixedLengthVector()) {
3453       ContainerVT = getContainerForFixedLengthVector(VecVT);
3454       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3455     }
3456     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
3457                          DAG.getUNDEF(ContainerVT), SubVec,
3458                          DAG.getConstant(0, DL, XLenVT));
3459     SDValue Mask =
3460         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3461     // Set the vector length to only the number of elements we care about. Note
3462     // that for slideup this includes the offset.
3463     SDValue VL =
3464         DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
3465     SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3466     SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3467                                   SubVec, SlideupAmt, Mask, VL);
3468     if (VecVT.isFixedLengthVector())
3469       Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3470     return DAG.getBitcast(Op.getValueType(), Slideup);
3471   }
3472 
3473   unsigned SubRegIdx, RemIdx;
3474   std::tie(SubRegIdx, RemIdx) =
3475       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3476           VecVT, SubVecVT, OrigIdx, TRI);
3477 
3478   RISCVVLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
3479   bool IsSubVecPartReg = SubVecLMUL == RISCVVLMUL::LMUL_F2 ||
3480                          SubVecLMUL == RISCVVLMUL::LMUL_F4 ||
3481                          SubVecLMUL == RISCVVLMUL::LMUL_F8;
3482 
3483   // 1. If the Idx has been completely eliminated and this subvector's size is
3484   // a vector register or a multiple thereof, or the surrounding elements are
3485   // undef, then this is a subvector insert which naturally aligns to a vector
3486   // register. These can easily be handled using subregister manipulation.
3487   // 2. If the subvector is smaller than a vector register, then the insertion
3488   // must preserve the undisturbed elements of the register. We do this by
3489   // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
3490   // (which resolves to a subregister copy), performing a VSLIDEUP to place the
3491   // subvector within the vector register, and an INSERT_SUBVECTOR of that
3492   // LMUL=1 type back into the larger vector (resolving to another subregister
3493   // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
3494   // to avoid allocating a large register group to hold our subvector.
3495   if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
3496     return Op;
3497 
3498   // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
3499   // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
3500   // (in our case undisturbed). This means we can set up a subvector insertion
3501   // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
3502   // size of the subvector.
3503   MVT InterSubVT = VecVT;
3504   SDValue AlignedExtract = Vec;
3505   unsigned AlignedIdx = OrigIdx - RemIdx;
3506   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3507     InterSubVT = getLMUL1VT(VecVT);
3508     // Extract a subvector equal to the nearest full vector register type. This
3509     // should resolve to a EXTRACT_SUBREG instruction.
3510     AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3511                                  DAG.getConstant(AlignedIdx, DL, XLenVT));
3512   }
3513 
3514   SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3515   // For scalable vectors this must be further multiplied by vscale.
3516   SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
3517 
3518   SDValue Mask, VL;
3519   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3520 
3521   // Construct the vector length corresponding to RemIdx + length(SubVecVT).
3522   VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
3523   VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
3524   VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
3525 
3526   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
3527                        DAG.getUNDEF(InterSubVT), SubVec,
3528                        DAG.getConstant(0, DL, XLenVT));
3529 
3530   SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
3531                                 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
3532 
3533   // If required, insert this subvector back into the correct vector register.
3534   // This should resolve to an INSERT_SUBREG instruction.
3535   if (VecVT.bitsGT(InterSubVT))
3536     Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
3537                           DAG.getConstant(AlignedIdx, DL, XLenVT));
3538 
3539   // We might have bitcast from a mask type: cast back to the original type if
3540   // required.
3541   return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
3542 }
3543 
3544 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
3545                                                     SelectionDAG &DAG) const {
3546   SDValue Vec = Op.getOperand(0);
3547   MVT SubVecVT = Op.getSimpleValueType();
3548   MVT VecVT = Vec.getSimpleValueType();
3549 
3550   SDLoc DL(Op);
3551   MVT XLenVT = Subtarget.getXLenVT();
3552   unsigned OrigIdx = Op.getConstantOperandVal(1);
3553   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
3554 
3555   // We don't have the ability to slide mask vectors down indexed by their i1
3556   // elements; the smallest we can do is i8. Often we are able to bitcast to
3557   // equivalent i8 vectors. Note that when extracting a fixed-length vector
3558   // from a scalable one, we might not necessarily have enough scalable
3559   // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
3560   if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
3561     if (VecVT.getVectorMinNumElements() >= 8 &&
3562         SubVecVT.getVectorMinNumElements() >= 8) {
3563       assert(OrigIdx % 8 == 0 && "Invalid index");
3564       assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
3565              SubVecVT.getVectorMinNumElements() % 8 == 0 &&
3566              "Unexpected mask vector lowering");
3567       OrigIdx /= 8;
3568       SubVecVT =
3569           MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
3570                            SubVecVT.isScalableVector());
3571       VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
3572                                VecVT.isScalableVector());
3573       Vec = DAG.getBitcast(VecVT, Vec);
3574     } else {
3575       // We can't slide this mask vector down, indexed by its i1 elements.
3576       // This poses a problem when we wish to extract a scalable vector which
3577       // can't be re-expressed as a larger type. Just choose the slow path and
3578       // extend to a larger type, then truncate back down.
3579       // TODO: We could probably improve this when extracting certain fixed
3580       // from fixed, where we can extract as i8 and shift the correct element
3581       // right to reach the desired subvector?
3582       MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
3583       MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
3584       Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
3585       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
3586                         Op.getOperand(1));
3587       SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
3588       return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
3589     }
3590   }
3591 
3592   // If the subvector vector is a fixed-length type, we cannot use subregister
3593   // manipulation to simplify the codegen; we don't know which register of a
3594   // LMUL group contains the specific subvector as we only know the minimum
3595   // register size. Therefore we must slide the vector group down the full
3596   // amount.
3597   if (SubVecVT.isFixedLengthVector()) {
3598     // With an index of 0 this is a cast-like subvector, which can be performed
3599     // with subregister operations.
3600     if (OrigIdx == 0)
3601       return Op;
3602     MVT ContainerVT = VecVT;
3603     if (VecVT.isFixedLengthVector()) {
3604       ContainerVT = getContainerForFixedLengthVector(VecVT);
3605       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3606     }
3607     SDValue Mask =
3608         getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
3609     // Set the vector length to only the number of elements we care about. This
3610     // avoids sliding down elements we're going to discard straight away.
3611     SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
3612     SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
3613     SDValue Slidedown =
3614         DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3615                     DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
3616     // Now we can use a cast-like subvector extract to get the result.
3617     Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3618                             DAG.getConstant(0, DL, XLenVT));
3619     return DAG.getBitcast(Op.getValueType(), Slidedown);
3620   }
3621 
3622   unsigned SubRegIdx, RemIdx;
3623   std::tie(SubRegIdx, RemIdx) =
3624       RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
3625           VecVT, SubVecVT, OrigIdx, TRI);
3626 
3627   // If the Idx has been completely eliminated then this is a subvector extract
3628   // which naturally aligns to a vector register. These can easily be handled
3629   // using subregister manipulation.
3630   if (RemIdx == 0)
3631     return Op;
3632 
3633   // Else we must shift our vector register directly to extract the subvector.
3634   // Do this using VSLIDEDOWN.
3635 
3636   // If the vector type is an LMUL-group type, extract a subvector equal to the
3637   // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
3638   // instruction.
3639   MVT InterSubVT = VecVT;
3640   if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
3641     InterSubVT = getLMUL1VT(VecVT);
3642     Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
3643                       DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
3644   }
3645 
3646   // Slide this vector register down by the desired number of elements in order
3647   // to place the desired subvector starting at element 0.
3648   SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
3649   // For scalable vectors this must be further multiplied by vscale.
3650   SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
3651 
3652   SDValue Mask, VL;
3653   std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
3654   SDValue Slidedown =
3655       DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
3656                   DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
3657 
3658   // Now the vector is in the right position, extract our final subvector. This
3659   // should resolve to a COPY.
3660   Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
3661                           DAG.getConstant(0, DL, XLenVT));
3662 
3663   // We might have bitcast from a mask type: cast back to the original type if
3664   // required.
3665   return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
3666 }
3667 
3668 // Implement step_vector to the vid instruction.
3669 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
3670                                               SelectionDAG &DAG) const {
3671   SDLoc DL(Op);
3672   assert(Op.getConstantOperandAPInt(0) == 1 && "Unexpected step value");
3673   MVT VT = Op.getSimpleValueType();
3674   SDValue Mask, VL;
3675   std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
3676   return DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3677 }
3678 
3679 // Implement vector_reverse using vrgather.vv with indices determined by
3680 // subtracting the id of each element from (VLMAX-1). This will convert
3681 // the indices like so:
3682 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
3683 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
3684 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
3685                                                  SelectionDAG &DAG) const {
3686   SDLoc DL(Op);
3687   MVT VecVT = Op.getSimpleValueType();
3688   unsigned EltSize = VecVT.getScalarSizeInBits();
3689   unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
3690 
3691   unsigned MaxVLMAX = 0;
3692   unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
3693   if (VectorBitsMax != 0)
3694     MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
3695 
3696   unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
3697   MVT IntVT = VecVT.changeVectorElementTypeToInteger();
3698 
3699   // If this is SEW=8 and VLMAX is unknown or more than 256, we need
3700   // to use vrgatherei16.vv.
3701   // TODO: It's also possible to use vrgatherei16.vv for other types to
3702   // decrease register width for the index calculation.
3703   if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
3704     // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
3705     // Reverse each half, then reassemble them in reverse order.
3706     // NOTE: It's also possible that after splitting that VLMAX no longer
3707     // requires vrgatherei16.vv.
3708     if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
3709       SDValue Lo, Hi;
3710       std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
3711       EVT LoVT, HiVT;
3712       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
3713       Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
3714       Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
3715       // Reassemble the low and high pieces reversed.
3716       // FIXME: This is a CONCAT_VECTORS.
3717       SDValue Res =
3718           DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
3719                       DAG.getIntPtrConstant(0, DL));
3720       return DAG.getNode(
3721           ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
3722           DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
3723     }
3724 
3725     // Just promote the int type to i16 which will double the LMUL.
3726     IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
3727     GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
3728   }
3729 
3730   MVT XLenVT = Subtarget.getXLenVT();
3731   SDValue Mask, VL;
3732   std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
3733 
3734   // Calculate VLMAX-1 for the desired SEW.
3735   unsigned MinElts = VecVT.getVectorMinNumElements();
3736   SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
3737                               DAG.getConstant(MinElts, DL, XLenVT));
3738   SDValue VLMinus1 =
3739       DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
3740 
3741   // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
3742   bool IsRV32E64 =
3743       !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
3744   SDValue SplatVL;
3745   if (!IsRV32E64)
3746     SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
3747   else
3748     SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
3749 
3750   SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
3751   SDValue Indices =
3752       DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
3753 
3754   return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
3755 }
3756 
3757 SDValue
3758 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
3759                                                      SelectionDAG &DAG) const {
3760   auto *Load = cast<LoadSDNode>(Op);
3761 
3762   SDLoc DL(Op);
3763   MVT VT = Op.getSimpleValueType();
3764   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3765 
3766   SDValue VL =
3767       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3768 
3769   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3770   SDValue NewLoad = DAG.getMemIntrinsicNode(
3771       RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
3772       Load->getMemoryVT(), Load->getMemOperand());
3773 
3774   SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
3775   return DAG.getMergeValues({Result, Load->getChain()}, DL);
3776 }
3777 
3778 SDValue
3779 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
3780                                                       SelectionDAG &DAG) const {
3781   auto *Store = cast<StoreSDNode>(Op);
3782 
3783   SDLoc DL(Op);
3784   SDValue StoreVal = Store->getValue();
3785   MVT VT = StoreVal.getSimpleValueType();
3786 
3787   // If the size less than a byte, we need to pad with zeros to make a byte.
3788   if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
3789     VT = MVT::v8i1;
3790     StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
3791                            DAG.getConstant(0, DL, VT), StoreVal,
3792                            DAG.getIntPtrConstant(0, DL));
3793   }
3794 
3795   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3796 
3797   SDValue VL =
3798       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3799 
3800   SDValue NewValue =
3801       convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
3802   return DAG.getMemIntrinsicNode(
3803       RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
3804       {Store->getChain(), NewValue, Store->getBasePtr(), VL},
3805       Store->getMemoryVT(), Store->getMemOperand());
3806 }
3807 
3808 SDValue RISCVTargetLowering::lowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3809   auto *Load = cast<MaskedLoadSDNode>(Op);
3810 
3811   SDLoc DL(Op);
3812   MVT VT = Op.getSimpleValueType();
3813   MVT XLenVT = Subtarget.getXLenVT();
3814 
3815   SDValue Mask = Load->getMask();
3816   SDValue PassThru = Load->getPassThru();
3817   SDValue VL;
3818 
3819   MVT ContainerVT = VT;
3820   if (VT.isFixedLengthVector()) {
3821     ContainerVT = getContainerForFixedLengthVector(VT);
3822     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3823 
3824     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3825     PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
3826     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3827   } else
3828     VL = DAG.getRegister(RISCV::X0, XLenVT);
3829 
3830   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
3831   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
3832   SDValue Ops[] = {Load->getChain(),   IntID, PassThru,
3833                    Load->getBasePtr(), Mask,  VL};
3834   SDValue Result =
3835       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
3836                               Load->getMemoryVT(), Load->getMemOperand());
3837   SDValue Chain = Result.getValue(1);
3838 
3839   if (VT.isFixedLengthVector())
3840     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
3841 
3842   return DAG.getMergeValues({Result, Chain}, DL);
3843 }
3844 
3845 SDValue RISCVTargetLowering::lowerMSTORE(SDValue Op, SelectionDAG &DAG) const {
3846   auto *Store = cast<MaskedStoreSDNode>(Op);
3847 
3848   SDLoc DL(Op);
3849   SDValue Val = Store->getValue();
3850   SDValue Mask = Store->getMask();
3851   MVT VT = Val.getSimpleValueType();
3852   MVT XLenVT = Subtarget.getXLenVT();
3853   SDValue VL;
3854 
3855   MVT ContainerVT = VT;
3856   if (VT.isFixedLengthVector()) {
3857     ContainerVT = getContainerForFixedLengthVector(VT);
3858     MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3859 
3860     Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
3861     Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
3862     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
3863   } else
3864     VL = DAG.getRegister(RISCV::X0, XLenVT);
3865 
3866   SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vse_mask, DL, XLenVT);
3867   return DAG.getMemIntrinsicNode(
3868       ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other),
3869       {Store->getChain(), IntID, Val, Store->getBasePtr(), Mask, VL},
3870       Store->getMemoryVT(), Store->getMemOperand());
3871 }
3872 
3873 SDValue
3874 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
3875                                                       SelectionDAG &DAG) const {
3876   MVT InVT = Op.getOperand(0).getSimpleValueType();
3877   MVT ContainerVT = getContainerForFixedLengthVector(InVT);
3878 
3879   MVT VT = Op.getSimpleValueType();
3880 
3881   SDValue Op1 =
3882       convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3883   SDValue Op2 =
3884       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
3885 
3886   SDLoc DL(Op);
3887   SDValue VL =
3888       DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
3889 
3890   MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3891   SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3892 
3893   SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
3894                             Op.getOperand(2), Mask, VL);
3895 
3896   return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
3897 }
3898 
3899 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
3900     SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
3901   MVT VT = Op.getSimpleValueType();
3902 
3903   if (VT.getVectorElementType() == MVT::i1)
3904     return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
3905 
3906   return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
3907 }
3908 
3909 // Lower vector ABS to smax(X, sub(0, X)).
3910 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
3911   SDLoc DL(Op);
3912   MVT VT = Op.getSimpleValueType();
3913   SDValue X = Op.getOperand(0);
3914 
3915   assert(VT.isFixedLengthVector() && "Unexpected type");
3916 
3917   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3918   X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
3919 
3920   SDValue Mask, VL;
3921   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3922 
3923   SDValue SplatZero =
3924       DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
3925                   DAG.getConstant(0, DL, Subtarget.getXLenVT()));
3926   SDValue NegX =
3927       DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
3928   SDValue Max =
3929       DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
3930 
3931   return convertFromScalableVector(VT, Max, DAG, Subtarget);
3932 }
3933 
3934 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
3935     SDValue Op, SelectionDAG &DAG) const {
3936   SDLoc DL(Op);
3937   MVT VT = Op.getSimpleValueType();
3938   SDValue Mag = Op.getOperand(0);
3939   SDValue Sign = Op.getOperand(1);
3940   assert(Mag.getValueType() == Sign.getValueType() &&
3941          "Can only handle COPYSIGN with matching types.");
3942 
3943   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3944   Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
3945   Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
3946 
3947   SDValue Mask, VL;
3948   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3949 
3950   SDValue CopySign =
3951       DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
3952 
3953   return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
3954 }
3955 
3956 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
3957     SDValue Op, SelectionDAG &DAG) const {
3958   MVT VT = Op.getSimpleValueType();
3959   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3960 
3961   MVT I1ContainerVT =
3962       MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3963 
3964   SDValue CC =
3965       convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
3966   SDValue Op1 =
3967       convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
3968   SDValue Op2 =
3969       convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
3970 
3971   SDLoc DL(Op);
3972   SDValue Mask, VL;
3973   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3974 
3975   SDValue Select =
3976       DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
3977 
3978   return convertFromScalableVector(VT, Select, DAG, Subtarget);
3979 }
3980 
3981 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
3982                                                unsigned NewOpc,
3983                                                bool HasMask) const {
3984   MVT VT = Op.getSimpleValueType();
3985   assert(useRVVForFixedLengthVectorVT(VT) &&
3986          "Only expected to lower fixed length vector operation!");
3987   MVT ContainerVT = getContainerForFixedLengthVector(VT);
3988 
3989   // Create list of operands by converting existing ones to scalable types.
3990   SmallVector<SDValue, 6> Ops;
3991   for (const SDValue &V : Op->op_values()) {
3992     assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
3993 
3994     // Pass through non-vector operands.
3995     if (!V.getValueType().isVector()) {
3996       Ops.push_back(V);
3997       continue;
3998     }
3999 
4000     // "cast" fixed length vector to a scalable vector.
4001     assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
4002            "Only fixed length vectors are supported!");
4003     Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
4004   }
4005 
4006   SDLoc DL(Op);
4007   SDValue Mask, VL;
4008   std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4009   if (HasMask)
4010     Ops.push_back(Mask);
4011   Ops.push_back(VL);
4012 
4013   SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
4014   return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
4015 }
4016 
4017 // Custom lower MGATHER to a legalized form for RVV. It will then be matched to
4018 // a RVV indexed load. The RVV indexed load instructions only support the
4019 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4020 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4021 // indexing is extended to the XLEN value type and scaled accordingly.
4022 SDValue RISCVTargetLowering::lowerMGATHER(SDValue Op, SelectionDAG &DAG) const {
4023   auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
4024   SDLoc DL(Op);
4025 
4026   SDValue Index = MGN->getIndex();
4027   SDValue Mask = MGN->getMask();
4028   SDValue PassThru = MGN->getPassThru();
4029 
4030   MVT VT = Op.getSimpleValueType();
4031   MVT IndexVT = Index.getSimpleValueType();
4032   MVT XLenVT = Subtarget.getXLenVT();
4033 
4034   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4035          "Unexpected VTs!");
4036   assert(MGN->getBasePtr().getSimpleValueType() == XLenVT &&
4037          "Unexpected pointer type");
4038   // Targets have to explicitly opt-in for extending vector loads.
4039   assert(MGN->getExtensionType() == ISD::NON_EXTLOAD &&
4040          "Unexpected extending MGATHER");
4041 
4042   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4043   // the selection of the masked intrinsics doesn't do this for us.
4044   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4045 
4046   SDValue VL;
4047   MVT ContainerVT = VT;
4048   if (VT.isFixedLengthVector()) {
4049     // We need to use the larger of the result and index type to determine the
4050     // scalable type to use so we don't increase LMUL for any operand/result.
4051     if (VT.bitsGE(IndexVT)) {
4052       ContainerVT = getContainerForFixedLengthVector(VT);
4053       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4054                                  ContainerVT.getVectorElementCount());
4055     } else {
4056       IndexVT = getContainerForFixedLengthVector(IndexVT);
4057       ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
4058                                      IndexVT.getVectorElementCount());
4059     }
4060 
4061     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4062 
4063     if (!IsUnmasked) {
4064       MVT MaskVT =
4065           MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4066       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4067       PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4068     }
4069 
4070     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4071   } else
4072     VL = DAG.getRegister(RISCV::X0, XLenVT);
4073 
4074   unsigned IntID =
4075       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
4076   SmallVector<SDValue, 8> Ops{MGN->getChain(),
4077                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4078   if (!IsUnmasked)
4079     Ops.push_back(PassThru);
4080   Ops.push_back(MGN->getBasePtr());
4081   Ops.push_back(Index);
4082   if (!IsUnmasked)
4083     Ops.push_back(Mask);
4084   Ops.push_back(VL);
4085 
4086   SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4087   SDValue Result =
4088       DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4089                               MGN->getMemoryVT(), MGN->getMemOperand());
4090   SDValue Chain = Result.getValue(1);
4091 
4092   if (VT.isFixedLengthVector())
4093     Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4094 
4095   return DAG.getMergeValues({Result, Chain}, DL);
4096 }
4097 
4098 // Custom lower MSCATTER to a legalized form for RVV. It will then be matched to
4099 // a RVV indexed store. The RVV indexed store instructions only support the
4100 // "unsigned unscaled" addressing mode; indices are implicitly zero-extended or
4101 // truncated to XLEN and are treated as byte offsets. Any signed or scaled
4102 // indexing is extended to the XLEN value type and scaled accordingly.
4103 SDValue RISCVTargetLowering::lowerMSCATTER(SDValue Op,
4104                                            SelectionDAG &DAG) const {
4105   auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
4106   SDLoc DL(Op);
4107   SDValue Index = MSN->getIndex();
4108   SDValue Mask = MSN->getMask();
4109   SDValue Val = MSN->getValue();
4110 
4111   MVT VT = Val.getSimpleValueType();
4112   MVT IndexVT = Index.getSimpleValueType();
4113   MVT XLenVT = Subtarget.getXLenVT();
4114 
4115   assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
4116          "Unexpected VTs!");
4117   assert(MSN->getBasePtr().getSimpleValueType() == XLenVT &&
4118          "Unexpected pointer type");
4119   // Targets have to explicitly opt-in for extending vector loads and
4120   // truncating vector stores.
4121   assert(!MSN->isTruncatingStore() && "Unexpected extending MSCATTER");
4122 
4123   // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4124   // the selection of the masked intrinsics doesn't do this for us.
4125   bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4126 
4127   SDValue VL;
4128   if (VT.isFixedLengthVector()) {
4129     // We need to use the larger of the value and index type to determine the
4130     // scalable type to use so we don't increase LMUL for any operand/result.
4131     if (VT.bitsGE(IndexVT)) {
4132       VT = getContainerForFixedLengthVector(VT);
4133       IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
4134                                  VT.getVectorElementCount());
4135     } else {
4136       IndexVT = getContainerForFixedLengthVector(IndexVT);
4137       VT = MVT::getVectorVT(VT.getVectorElementType(),
4138                             IndexVT.getVectorElementCount());
4139     }
4140 
4141     Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
4142     Val = convertToScalableVector(VT, Val, DAG, Subtarget);
4143 
4144     if (!IsUnmasked) {
4145       MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
4146       Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4147     }
4148 
4149     VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4150   } else
4151     VL = DAG.getRegister(RISCV::X0, XLenVT);
4152 
4153   unsigned IntID =
4154       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
4155   SmallVector<SDValue, 8> Ops{MSN->getChain(),
4156                               DAG.getTargetConstant(IntID, DL, XLenVT)};
4157   Ops.push_back(Val);
4158   Ops.push_back(MSN->getBasePtr());
4159   Ops.push_back(Index);
4160   if (!IsUnmasked)
4161     Ops.push_back(Mask);
4162   Ops.push_back(VL);
4163 
4164   return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, MSN->getVTList(), Ops,
4165                                  MSN->getMemoryVT(), MSN->getMemOperand());
4166 }
4167 
4168 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
4169                                                SelectionDAG &DAG) const {
4170   const MVT XLenVT = Subtarget.getXLenVT();
4171   SDLoc DL(Op);
4172   SDValue Chain = Op->getOperand(0);
4173   SDValue SysRegNo = DAG.getConstant(
4174       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4175   SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
4176   SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
4177 
4178   // Encoding used for rounding mode in RISCV differs from that used in
4179   // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
4180   // table, which consists of a sequence of 4-bit fields, each representing
4181   // corresponding FLT_ROUNDS mode.
4182   static const int Table =
4183       (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
4184       (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
4185       (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
4186       (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
4187       (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
4188 
4189   SDValue Shift =
4190       DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
4191   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4192                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4193   SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4194                                DAG.getConstant(7, DL, XLenVT));
4195 
4196   return DAG.getMergeValues({Masked, Chain}, DL);
4197 }
4198 
4199 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
4200                                                SelectionDAG &DAG) const {
4201   const MVT XLenVT = Subtarget.getXLenVT();
4202   SDLoc DL(Op);
4203   SDValue Chain = Op->getOperand(0);
4204   SDValue RMValue = Op->getOperand(1);
4205   SDValue SysRegNo = DAG.getConstant(
4206       RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
4207 
4208   // Encoding used for rounding mode in RISCV differs from that used in
4209   // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
4210   // a table, which consists of a sequence of 4-bit fields, each representing
4211   // corresponding RISCV mode.
4212   static const unsigned Table =
4213       (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
4214       (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
4215       (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
4216       (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
4217       (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
4218 
4219   SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
4220                               DAG.getConstant(2, DL, XLenVT));
4221   SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
4222                                 DAG.getConstant(Table, DL, XLenVT), Shift);
4223   RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
4224                         DAG.getConstant(0x7, DL, XLenVT));
4225   return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
4226                      RMValue);
4227 }
4228 
4229 // Returns the opcode of the target-specific SDNode that implements the 32-bit
4230 // form of the given Opcode.
4231 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
4232   switch (Opcode) {
4233   default:
4234     llvm_unreachable("Unexpected opcode");
4235   case ISD::SHL:
4236     return RISCVISD::SLLW;
4237   case ISD::SRA:
4238     return RISCVISD::SRAW;
4239   case ISD::SRL:
4240     return RISCVISD::SRLW;
4241   case ISD::SDIV:
4242     return RISCVISD::DIVW;
4243   case ISD::UDIV:
4244     return RISCVISD::DIVUW;
4245   case ISD::UREM:
4246     return RISCVISD::REMUW;
4247   case ISD::ROTL:
4248     return RISCVISD::ROLW;
4249   case ISD::ROTR:
4250     return RISCVISD::RORW;
4251   case RISCVISD::GREV:
4252     return RISCVISD::GREVW;
4253   case RISCVISD::GORC:
4254     return RISCVISD::GORCW;
4255   }
4256 }
4257 
4258 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
4259 // Because i32 isn't a legal type for RV64, these operations would otherwise
4260 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
4261 // later one because the fact the operation was originally of type i32 is
4262 // lost.
4263 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
4264                                    unsigned ExtOpc = ISD::ANY_EXTEND) {
4265   SDLoc DL(N);
4266   RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4267   SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
4268   SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
4269   SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4270   // ReplaceNodeResults requires we maintain the same type for the return value.
4271   return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
4272 }
4273 
4274 // Converts the given 32-bit operation to a i64 operation with signed extension
4275 // semantic to reduce the signed extension instructions.
4276 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
4277   SDLoc DL(N);
4278   SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4279   SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4280   SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
4281   SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
4282                                DAG.getValueType(MVT::i32));
4283   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
4284 }
4285 
4286 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
4287                                              SmallVectorImpl<SDValue> &Results,
4288                                              SelectionDAG &DAG) const {
4289   SDLoc DL(N);
4290   switch (N->getOpcode()) {
4291   default:
4292     llvm_unreachable("Don't know how to custom type legalize this operation!");
4293   case ISD::STRICT_FP_TO_SINT:
4294   case ISD::STRICT_FP_TO_UINT:
4295   case ISD::FP_TO_SINT:
4296   case ISD::FP_TO_UINT: {
4297     bool IsStrict = N->isStrictFPOpcode();
4298     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4299            "Unexpected custom legalisation");
4300     SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
4301     // If the FP type needs to be softened, emit a library call using the 'si'
4302     // version. If we left it to default legalization we'd end up with 'di'. If
4303     // the FP type doesn't need to be softened just let generic type
4304     // legalization promote the result type.
4305     if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
4306         TargetLowering::TypeSoftenFloat)
4307       return;
4308     RTLIB::Libcall LC;
4309     if (N->getOpcode() == ISD::FP_TO_SINT ||
4310         N->getOpcode() == ISD::STRICT_FP_TO_SINT)
4311       LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
4312     else
4313       LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
4314     MakeLibCallOptions CallOptions;
4315     EVT OpVT = Op0.getValueType();
4316     CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
4317     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
4318     SDValue Result;
4319     std::tie(Result, Chain) =
4320         makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
4321     Results.push_back(Result);
4322     if (IsStrict)
4323       Results.push_back(Chain);
4324     break;
4325   }
4326   case ISD::READCYCLECOUNTER: {
4327     assert(!Subtarget.is64Bit() &&
4328            "READCYCLECOUNTER only has custom type legalization on riscv32");
4329 
4330     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
4331     SDValue RCW =
4332         DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
4333 
4334     Results.push_back(
4335         DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
4336     Results.push_back(RCW.getValue(2));
4337     break;
4338   }
4339   case ISD::MUL: {
4340     unsigned Size = N->getSimpleValueType(0).getSizeInBits();
4341     unsigned XLen = Subtarget.getXLen();
4342     // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
4343     if (Size > XLen) {
4344       assert(Size == (XLen * 2) && "Unexpected custom legalisation");
4345       SDValue LHS = N->getOperand(0);
4346       SDValue RHS = N->getOperand(1);
4347       APInt HighMask = APInt::getHighBitsSet(Size, XLen);
4348 
4349       bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
4350       bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
4351       // We need exactly one side to be unsigned.
4352       if (LHSIsU == RHSIsU)
4353         return;
4354 
4355       auto MakeMULPair = [&](SDValue S, SDValue U) {
4356         MVT XLenVT = Subtarget.getXLenVT();
4357         S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
4358         U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
4359         SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
4360         SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
4361         return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
4362       };
4363 
4364       bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
4365       bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
4366 
4367       // The other operand should be signed, but still prefer MULH when
4368       // possible.
4369       if (RHSIsU && LHSIsS && !RHSIsS)
4370         Results.push_back(MakeMULPair(LHS, RHS));
4371       else if (LHSIsU && RHSIsS && !LHSIsS)
4372         Results.push_back(MakeMULPair(RHS, LHS));
4373 
4374       return;
4375     }
4376     LLVM_FALLTHROUGH;
4377   }
4378   case ISD::ADD:
4379   case ISD::SUB:
4380     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4381            "Unexpected custom legalisation");
4382     if (N->getOperand(1).getOpcode() == ISD::Constant)
4383       return;
4384     Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
4385     break;
4386   case ISD::SHL:
4387   case ISD::SRA:
4388   case ISD::SRL:
4389     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4390            "Unexpected custom legalisation");
4391     if (N->getOperand(1).getOpcode() == ISD::Constant)
4392       return;
4393     Results.push_back(customLegalizeToWOp(N, DAG));
4394     break;
4395   case ISD::ROTL:
4396   case ISD::ROTR:
4397     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4398            "Unexpected custom legalisation");
4399     Results.push_back(customLegalizeToWOp(N, DAG));
4400     break;
4401   case ISD::CTTZ:
4402   case ISD::CTTZ_ZERO_UNDEF:
4403   case ISD::CTLZ:
4404   case ISD::CTLZ_ZERO_UNDEF: {
4405     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4406            "Unexpected custom legalisation");
4407 
4408     SDValue NewOp0 =
4409         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4410     bool IsCTZ =
4411         N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
4412     unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
4413     SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
4414     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4415     return;
4416   }
4417   case ISD::SDIV:
4418   case ISD::UDIV:
4419   case ISD::UREM: {
4420     MVT VT = N->getSimpleValueType(0);
4421     assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
4422            Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
4423            "Unexpected custom legalisation");
4424     if (N->getOperand(0).getOpcode() == ISD::Constant ||
4425         N->getOperand(1).getOpcode() == ISD::Constant)
4426       return;
4427 
4428     // If the input is i32, use ANY_EXTEND since the W instructions don't read
4429     // the upper 32 bits. For other types we need to sign or zero extend
4430     // based on the opcode.
4431     unsigned ExtOpc = ISD::ANY_EXTEND;
4432     if (VT != MVT::i32)
4433       ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
4434                                            : ISD::ZERO_EXTEND;
4435 
4436     Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
4437     break;
4438   }
4439   case ISD::UADDO:
4440   case ISD::USUBO: {
4441     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4442            "Unexpected custom legalisation");
4443     bool IsAdd = N->getOpcode() == ISD::UADDO;
4444     // Create an ADDW or SUBW.
4445     SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4446     SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4447     SDValue Res =
4448         DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
4449     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
4450                       DAG.getValueType(MVT::i32));
4451 
4452     // Sign extend the LHS and perform an unsigned compare with the ADDW result.
4453     // Since the inputs are sign extended from i32, this is equivalent to
4454     // comparing the lower 32 bits.
4455     LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4456     SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
4457                                     IsAdd ? ISD::SETULT : ISD::SETUGT);
4458 
4459     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4460     Results.push_back(Overflow);
4461     return;
4462   }
4463   case ISD::UADDSAT:
4464   case ISD::USUBSAT: {
4465     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4466            "Unexpected custom legalisation");
4467     if (Subtarget.hasStdExtZbb()) {
4468       // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
4469       // sign extend allows overflow of the lower 32 bits to be detected on
4470       // the promoted size.
4471       SDValue LHS =
4472           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
4473       SDValue RHS =
4474           DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
4475       SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
4476       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4477       return;
4478     }
4479 
4480     // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
4481     // promotion for UADDO/USUBO.
4482     Results.push_back(expandAddSubSat(N, DAG));
4483     return;
4484   }
4485   case ISD::BITCAST: {
4486     EVT VT = N->getValueType(0);
4487     assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
4488     SDValue Op0 = N->getOperand(0);
4489     EVT Op0VT = Op0.getValueType();
4490     MVT XLenVT = Subtarget.getXLenVT();
4491     if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
4492       SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
4493       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
4494     } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
4495                Subtarget.hasStdExtF()) {
4496       SDValue FPConv =
4497           DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
4498       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
4499     } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
4500                isTypeLegal(Op0VT)) {
4501       // Custom-legalize bitcasts from fixed-length vector types to illegal
4502       // scalar types in order to improve codegen. Bitcast the vector to a
4503       // one-element vector type whose element type is the same as the result
4504       // type, and extract the first element.
4505       LLVMContext &Context = *DAG.getContext();
4506       SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
4507       Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
4508                                     DAG.getConstant(0, DL, XLenVT)));
4509     }
4510     break;
4511   }
4512   case RISCVISD::GREV:
4513   case RISCVISD::GORC: {
4514     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4515            "Unexpected custom legalisation");
4516     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4517     // This is similar to customLegalizeToWOp, except that we pass the second
4518     // operand (a TargetConstant) straight through: it is already of type
4519     // XLenVT.
4520     RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
4521     SDValue NewOp0 =
4522         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4523     SDValue NewOp1 =
4524         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4525     SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
4526     // ReplaceNodeResults requires we maintain the same type for the return
4527     // value.
4528     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4529     break;
4530   }
4531   case RISCVISD::SHFL: {
4532     // There is no SHFLIW instruction, but we can just promote the operation.
4533     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4534            "Unexpected custom legalisation");
4535     assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
4536     SDValue NewOp0 =
4537         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4538     SDValue NewOp1 =
4539         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4540     SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
4541     // ReplaceNodeResults requires we maintain the same type for the return
4542     // value.
4543     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
4544     break;
4545   }
4546   case ISD::BSWAP:
4547   case ISD::BITREVERSE: {
4548     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4549            Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
4550     SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
4551                                  N->getOperand(0));
4552     unsigned Imm = N->getOpcode() == ISD::BITREVERSE ? 31 : 24;
4553     SDValue GREVIW = DAG.getNode(RISCVISD::GREVW, DL, MVT::i64, NewOp0,
4554                                  DAG.getConstant(Imm, DL, MVT::i64));
4555     // ReplaceNodeResults requires we maintain the same type for the return
4556     // value.
4557     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, GREVIW));
4558     break;
4559   }
4560   case ISD::FSHL:
4561   case ISD::FSHR: {
4562     assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
4563            Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
4564     SDValue NewOp0 =
4565         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
4566     SDValue NewOp1 =
4567         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4568     SDValue NewOp2 =
4569         DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
4570     // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
4571     // Mask the shift amount to 5 bits.
4572     NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
4573                          DAG.getConstant(0x1f, DL, MVT::i64));
4574     unsigned Opc =
4575         N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
4576     SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
4577     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
4578     break;
4579   }
4580   case ISD::EXTRACT_VECTOR_ELT: {
4581     // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
4582     // type is illegal (currently only vXi64 RV32).
4583     // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
4584     // transferred to the destination register. We issue two of these from the
4585     // upper- and lower- halves of the SEW-bit vector element, slid down to the
4586     // first element.
4587     SDValue Vec = N->getOperand(0);
4588     SDValue Idx = N->getOperand(1);
4589 
4590     // The vector type hasn't been legalized yet so we can't issue target
4591     // specific nodes if it needs legalization.
4592     // FIXME: We would manually legalize if it's important.
4593     if (!isTypeLegal(Vec.getValueType()))
4594       return;
4595 
4596     MVT VecVT = Vec.getSimpleValueType();
4597 
4598     assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
4599            VecVT.getVectorElementType() == MVT::i64 &&
4600            "Unexpected EXTRACT_VECTOR_ELT legalization");
4601 
4602     // If this is a fixed vector, we need to convert it to a scalable vector.
4603     MVT ContainerVT = VecVT;
4604     if (VecVT.isFixedLengthVector()) {
4605       ContainerVT = getContainerForFixedLengthVector(VecVT);
4606       Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4607     }
4608 
4609     MVT XLenVT = Subtarget.getXLenVT();
4610 
4611     // Use a VL of 1 to avoid processing more elements than we need.
4612     MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4613     SDValue VL = DAG.getConstant(1, DL, XLenVT);
4614     SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4615 
4616     // Unless the index is known to be 0, we must slide the vector down to get
4617     // the desired element into index 0.
4618     if (!isNullConstant(Idx)) {
4619       Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4620                         DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
4621     }
4622 
4623     // Extract the lower XLEN bits of the correct vector element.
4624     SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4625 
4626     // To extract the upper XLEN bits of the vector element, shift the first
4627     // element right by 32 bits and re-extract the lower XLEN bits.
4628     SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
4629                                      DAG.getConstant(32, DL, XLenVT), VL);
4630     SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
4631                                  ThirtyTwoV, Mask, VL);
4632 
4633     SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4634 
4635     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4636     break;
4637   }
4638   case ISD::INTRINSIC_WO_CHAIN: {
4639     unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
4640     switch (IntNo) {
4641     default:
4642       llvm_unreachable(
4643           "Don't know how to custom type legalize this intrinsic!");
4644     case Intrinsic::riscv_orc_b: {
4645       // Lower to the GORCI encoding for orc.b with the operand extended.
4646       SDValue NewOp =
4647           DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
4648       // If Zbp is enabled, use GORCIW which will sign extend the result.
4649       unsigned Opc =
4650           Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
4651       SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
4652                                 DAG.getConstant(7, DL, MVT::i64));
4653       Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
4654       return;
4655     }
4656     case Intrinsic::riscv_vmv_x_s: {
4657       EVT VT = N->getValueType(0);
4658       MVT XLenVT = Subtarget.getXLenVT();
4659       if (VT.bitsLT(XLenVT)) {
4660         // Simple case just extract using vmv.x.s and truncate.
4661         SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
4662                                       Subtarget.getXLenVT(), N->getOperand(1));
4663         Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
4664         return;
4665       }
4666 
4667       assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
4668              "Unexpected custom legalization");
4669 
4670       // We need to do the move in two steps.
4671       SDValue Vec = N->getOperand(1);
4672       MVT VecVT = Vec.getSimpleValueType();
4673 
4674       // First extract the lower XLEN bits of the element.
4675       SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
4676 
4677       // To extract the upper XLEN bits of the vector element, shift the first
4678       // element right by 32 bits and re-extract the lower XLEN bits.
4679       SDValue VL = DAG.getConstant(1, DL, XLenVT);
4680       MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
4681       SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4682       SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
4683                                        DAG.getConstant(32, DL, XLenVT), VL);
4684       SDValue LShr32 =
4685           DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
4686       SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
4687 
4688       Results.push_back(
4689           DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
4690       break;
4691     }
4692     }
4693     break;
4694   }
4695   case ISD::VECREDUCE_ADD:
4696   case ISD::VECREDUCE_AND:
4697   case ISD::VECREDUCE_OR:
4698   case ISD::VECREDUCE_XOR:
4699   case ISD::VECREDUCE_SMAX:
4700   case ISD::VECREDUCE_UMAX:
4701   case ISD::VECREDUCE_SMIN:
4702   case ISD::VECREDUCE_UMIN:
4703     if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
4704       Results.push_back(V);
4705     break;
4706   case ISD::FLT_ROUNDS_: {
4707     SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
4708     SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
4709     Results.push_back(Res.getValue(0));
4710     Results.push_back(Res.getValue(1));
4711     break;
4712   }
4713   }
4714 }
4715 
4716 // A structure to hold one of the bit-manipulation patterns below. Together, a
4717 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
4718 //   (or (and (shl x, 1), 0xAAAAAAAA),
4719 //       (and (srl x, 1), 0x55555555))
4720 struct RISCVBitmanipPat {
4721   SDValue Op;
4722   unsigned ShAmt;
4723   bool IsSHL;
4724 
4725   bool formsPairWith(const RISCVBitmanipPat &Other) const {
4726     return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
4727   }
4728 };
4729 
4730 // Matches patterns of the form
4731 //   (and (shl x, C2), (C1 << C2))
4732 //   (and (srl x, C2), C1)
4733 //   (shl (and x, C1), C2)
4734 //   (srl (and x, (C1 << C2)), C2)
4735 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
4736 // The expected masks for each shift amount are specified in BitmanipMasks where
4737 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
4738 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
4739 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
4740 // XLen is 64.
4741 static Optional<RISCVBitmanipPat>
4742 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
4743   assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
4744          "Unexpected number of masks");
4745   Optional<uint64_t> Mask;
4746   // Optionally consume a mask around the shift operation.
4747   if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
4748     Mask = Op.getConstantOperandVal(1);
4749     Op = Op.getOperand(0);
4750   }
4751   if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
4752     return None;
4753   bool IsSHL = Op.getOpcode() == ISD::SHL;
4754 
4755   if (!isa<ConstantSDNode>(Op.getOperand(1)))
4756     return None;
4757   uint64_t ShAmt = Op.getConstantOperandVal(1);
4758 
4759   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
4760   if (ShAmt >= Width && !isPowerOf2_64(ShAmt))
4761     return None;
4762   // If we don't have enough masks for 64 bit, then we must be trying to
4763   // match SHFL so we're only allowed to shift 1/4 of the width.
4764   if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
4765     return None;
4766 
4767   SDValue Src = Op.getOperand(0);
4768 
4769   // The expected mask is shifted left when the AND is found around SHL
4770   // patterns.
4771   //   ((x >> 1) & 0x55555555)
4772   //   ((x << 1) & 0xAAAAAAAA)
4773   bool SHLExpMask = IsSHL;
4774 
4775   if (!Mask) {
4776     // Sometimes LLVM keeps the mask as an operand of the shift, typically when
4777     // the mask is all ones: consume that now.
4778     if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
4779       Mask = Src.getConstantOperandVal(1);
4780       Src = Src.getOperand(0);
4781       // The expected mask is now in fact shifted left for SRL, so reverse the
4782       // decision.
4783       //   ((x & 0xAAAAAAAA) >> 1)
4784       //   ((x & 0x55555555) << 1)
4785       SHLExpMask = !SHLExpMask;
4786     } else {
4787       // Use a default shifted mask of all-ones if there's no AND, truncated
4788       // down to the expected width. This simplifies the logic later on.
4789       Mask = maskTrailingOnes<uint64_t>(Width);
4790       *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
4791     }
4792   }
4793 
4794   unsigned MaskIdx = Log2_32(ShAmt);
4795   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
4796 
4797   if (SHLExpMask)
4798     ExpMask <<= ShAmt;
4799 
4800   if (Mask != ExpMask)
4801     return None;
4802 
4803   return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
4804 }
4805 
4806 // Matches any of the following bit-manipulation patterns:
4807 //   (and (shl x, 1), (0x55555555 << 1))
4808 //   (and (srl x, 1), 0x55555555)
4809 //   (shl (and x, 0x55555555), 1)
4810 //   (srl (and x, (0x55555555 << 1)), 1)
4811 // where the shift amount and mask may vary thus:
4812 //   [1]  = 0x55555555 / 0xAAAAAAAA
4813 //   [2]  = 0x33333333 / 0xCCCCCCCC
4814 //   [4]  = 0x0F0F0F0F / 0xF0F0F0F0
4815 //   [8]  = 0x00FF00FF / 0xFF00FF00
4816 //   [16] = 0x0000FFFF / 0xFFFFFFFF
4817 //   [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
4818 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
4819   // These are the unshifted masks which we use to match bit-manipulation
4820   // patterns. They may be shifted left in certain circumstances.
4821   static const uint64_t BitmanipMasks[] = {
4822       0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
4823       0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
4824 
4825   return matchRISCVBitmanipPat(Op, BitmanipMasks);
4826 }
4827 
4828 // Match the following pattern as a GREVI(W) operation
4829 //   (or (BITMANIP_SHL x), (BITMANIP_SRL x))
4830 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
4831                                const RISCVSubtarget &Subtarget) {
4832   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4833   EVT VT = Op.getValueType();
4834 
4835   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
4836     auto LHS = matchGREVIPat(Op.getOperand(0));
4837     auto RHS = matchGREVIPat(Op.getOperand(1));
4838     if (LHS && RHS && LHS->formsPairWith(*RHS)) {
4839       SDLoc DL(Op);
4840       return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
4841                          DAG.getConstant(LHS->ShAmt, DL, VT));
4842     }
4843   }
4844   return SDValue();
4845 }
4846 
4847 // Matches any the following pattern as a GORCI(W) operation
4848 // 1.  (or (GREVI x, shamt), x) if shamt is a power of 2
4849 // 2.  (or x, (GREVI x, shamt)) if shamt is a power of 2
4850 // 3.  (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
4851 // Note that with the variant of 3.,
4852 //     (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
4853 // the inner pattern will first be matched as GREVI and then the outer
4854 // pattern will be matched to GORC via the first rule above.
4855 // 4.  (or (rotl/rotr x, bitwidth/2), x)
4856 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
4857                                const RISCVSubtarget &Subtarget) {
4858   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4859   EVT VT = Op.getValueType();
4860 
4861   if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
4862     SDLoc DL(Op);
4863     SDValue Op0 = Op.getOperand(0);
4864     SDValue Op1 = Op.getOperand(1);
4865 
4866     auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
4867       if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
4868           isa<ConstantSDNode>(Reverse.getOperand(1)) &&
4869           isPowerOf2_32(Reverse.getConstantOperandVal(1)))
4870         return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
4871       // We can also form GORCI from ROTL/ROTR by half the bitwidth.
4872       if ((Reverse.getOpcode() == ISD::ROTL ||
4873            Reverse.getOpcode() == ISD::ROTR) &&
4874           Reverse.getOperand(0) == X &&
4875           isa<ConstantSDNode>(Reverse.getOperand(1))) {
4876         uint64_t RotAmt = Reverse.getConstantOperandVal(1);
4877         if (RotAmt == (VT.getSizeInBits() / 2))
4878           return DAG.getNode(RISCVISD::GORC, DL, VT, X,
4879                              DAG.getConstant(RotAmt, DL, VT));
4880       }
4881       return SDValue();
4882     };
4883 
4884     // Check for either commutable permutation of (or (GREVI x, shamt), x)
4885     if (SDValue V = MatchOROfReverse(Op0, Op1))
4886       return V;
4887     if (SDValue V = MatchOROfReverse(Op1, Op0))
4888       return V;
4889 
4890     // OR is commutable so canonicalize its OR operand to the left
4891     if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
4892       std::swap(Op0, Op1);
4893     if (Op0.getOpcode() != ISD::OR)
4894       return SDValue();
4895     SDValue OrOp0 = Op0.getOperand(0);
4896     SDValue OrOp1 = Op0.getOperand(1);
4897     auto LHS = matchGREVIPat(OrOp0);
4898     // OR is commutable so swap the operands and try again: x might have been
4899     // on the left
4900     if (!LHS) {
4901       std::swap(OrOp0, OrOp1);
4902       LHS = matchGREVIPat(OrOp0);
4903     }
4904     auto RHS = matchGREVIPat(Op1);
4905     if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
4906       return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
4907                          DAG.getConstant(LHS->ShAmt, DL, VT));
4908     }
4909   }
4910   return SDValue();
4911 }
4912 
4913 // Matches any of the following bit-manipulation patterns:
4914 //   (and (shl x, 1), (0x22222222 << 1))
4915 //   (and (srl x, 1), 0x22222222)
4916 //   (shl (and x, 0x22222222), 1)
4917 //   (srl (and x, (0x22222222 << 1)), 1)
4918 // where the shift amount and mask may vary thus:
4919 //   [1]  = 0x22222222 / 0x44444444
4920 //   [2]  = 0x0C0C0C0C / 0x3C3C3C3C
4921 //   [4]  = 0x00F000F0 / 0x0F000F00
4922 //   [8]  = 0x0000FF00 / 0x00FF0000
4923 //   [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
4924 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
4925   // These are the unshifted masks which we use to match bit-manipulation
4926   // patterns. They may be shifted left in certain circumstances.
4927   static const uint64_t BitmanipMasks[] = {
4928       0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
4929       0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
4930 
4931   return matchRISCVBitmanipPat(Op, BitmanipMasks);
4932 }
4933 
4934 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
4935 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
4936                                const RISCVSubtarget &Subtarget) {
4937   assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
4938   EVT VT = Op.getValueType();
4939 
4940   if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
4941     return SDValue();
4942 
4943   SDValue Op0 = Op.getOperand(0);
4944   SDValue Op1 = Op.getOperand(1);
4945 
4946   // Or is commutable so canonicalize the second OR to the LHS.
4947   if (Op0.getOpcode() != ISD::OR)
4948     std::swap(Op0, Op1);
4949   if (Op0.getOpcode() != ISD::OR)
4950     return SDValue();
4951 
4952   // We found an inner OR, so our operands are the operands of the inner OR
4953   // and the other operand of the outer OR.
4954   SDValue A = Op0.getOperand(0);
4955   SDValue B = Op0.getOperand(1);
4956   SDValue C = Op1;
4957 
4958   auto Match1 = matchSHFLPat(A);
4959   auto Match2 = matchSHFLPat(B);
4960 
4961   // If neither matched, we failed.
4962   if (!Match1 && !Match2)
4963     return SDValue();
4964 
4965   // We had at least one match. if one failed, try the remaining C operand.
4966   if (!Match1) {
4967     std::swap(A, C);
4968     Match1 = matchSHFLPat(A);
4969     if (!Match1)
4970       return SDValue();
4971   } else if (!Match2) {
4972     std::swap(B, C);
4973     Match2 = matchSHFLPat(B);
4974     if (!Match2)
4975       return SDValue();
4976   }
4977   assert(Match1 && Match2);
4978 
4979   // Make sure our matches pair up.
4980   if (!Match1->formsPairWith(*Match2))
4981     return SDValue();
4982 
4983   // All the remains is to make sure C is an AND with the same input, that masks
4984   // out the bits that are being shuffled.
4985   if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
4986       C.getOperand(0) != Match1->Op)
4987     return SDValue();
4988 
4989   uint64_t Mask = C.getConstantOperandVal(1);
4990 
4991   static const uint64_t BitmanipMasks[] = {
4992       0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
4993       0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
4994   };
4995 
4996   unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
4997   unsigned MaskIdx = Log2_32(Match1->ShAmt);
4998   uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
4999 
5000   if (Mask != ExpMask)
5001     return SDValue();
5002 
5003   SDLoc DL(Op);
5004   return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
5005                      DAG.getConstant(Match1->ShAmt, DL, VT));
5006 }
5007 
5008 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
5009 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
5010 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
5011 // not undo itself, but they are redundant.
5012 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
5013   SDValue Src = N->getOperand(0);
5014 
5015   if (Src.getOpcode() != N->getOpcode())
5016     return SDValue();
5017 
5018   if (!isa<ConstantSDNode>(N->getOperand(1)) ||
5019       !isa<ConstantSDNode>(Src.getOperand(1)))
5020     return SDValue();
5021 
5022   unsigned ShAmt1 = N->getConstantOperandVal(1);
5023   unsigned ShAmt2 = Src.getConstantOperandVal(1);
5024   Src = Src.getOperand(0);
5025 
5026   unsigned CombinedShAmt;
5027   if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
5028     CombinedShAmt = ShAmt1 | ShAmt2;
5029   else
5030     CombinedShAmt = ShAmt1 ^ ShAmt2;
5031 
5032   if (CombinedShAmt == 0)
5033     return Src;
5034 
5035   SDLoc DL(N);
5036   return DAG.getNode(
5037       N->getOpcode(), DL, N->getValueType(0), Src,
5038       DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
5039 }
5040 
5041 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
5042                                                DAGCombinerInfo &DCI) const {
5043   SelectionDAG &DAG = DCI.DAG;
5044 
5045   switch (N->getOpcode()) {
5046   default:
5047     break;
5048   case RISCVISD::SplitF64: {
5049     SDValue Op0 = N->getOperand(0);
5050     // If the input to SplitF64 is just BuildPairF64 then the operation is
5051     // redundant. Instead, use BuildPairF64's operands directly.
5052     if (Op0->getOpcode() == RISCVISD::BuildPairF64)
5053       return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
5054 
5055     SDLoc DL(N);
5056 
5057     // It's cheaper to materialise two 32-bit integers than to load a double
5058     // from the constant pool and transfer it to integer registers through the
5059     // stack.
5060     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
5061       APInt V = C->getValueAPF().bitcastToAPInt();
5062       SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
5063       SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
5064       return DCI.CombineTo(N, Lo, Hi);
5065     }
5066 
5067     // This is a target-specific version of a DAGCombine performed in
5068     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5069     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5070     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5071     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5072         !Op0.getNode()->hasOneUse())
5073       break;
5074     SDValue NewSplitF64 =
5075         DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
5076                     Op0.getOperand(0));
5077     SDValue Lo = NewSplitF64.getValue(0);
5078     SDValue Hi = NewSplitF64.getValue(1);
5079     APInt SignBit = APInt::getSignMask(32);
5080     if (Op0.getOpcode() == ISD::FNEG) {
5081       SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
5082                                   DAG.getConstant(SignBit, DL, MVT::i32));
5083       return DCI.CombineTo(N, Lo, NewHi);
5084     }
5085     assert(Op0.getOpcode() == ISD::FABS);
5086     SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
5087                                 DAG.getConstant(~SignBit, DL, MVT::i32));
5088     return DCI.CombineTo(N, Lo, NewHi);
5089   }
5090   case RISCVISD::SLLW:
5091   case RISCVISD::SRAW:
5092   case RISCVISD::SRLW:
5093   case RISCVISD::ROLW:
5094   case RISCVISD::RORW: {
5095     // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
5096     SDValue LHS = N->getOperand(0);
5097     SDValue RHS = N->getOperand(1);
5098     APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
5099     APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
5100     if (SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI) ||
5101         SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)) {
5102       if (N->getOpcode() != ISD::DELETED_NODE)
5103         DCI.AddToWorklist(N);
5104       return SDValue(N, 0);
5105     }
5106     break;
5107   }
5108   case RISCVISD::CLZW:
5109   case RISCVISD::CTZW: {
5110     // Only the lower 32 bits of the first operand are read
5111     SDValue Op0 = N->getOperand(0);
5112     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5113     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5114       if (N->getOpcode() != ISD::DELETED_NODE)
5115         DCI.AddToWorklist(N);
5116       return SDValue(N, 0);
5117     }
5118     break;
5119   }
5120   case RISCVISD::FSL:
5121   case RISCVISD::FSR: {
5122     // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
5123     SDValue ShAmt = N->getOperand(2);
5124     unsigned BitWidth = ShAmt.getValueSizeInBits();
5125     assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
5126     APInt ShAmtMask(BitWidth, (BitWidth * 2) - 1);
5127     if (SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5128       if (N->getOpcode() != ISD::DELETED_NODE)
5129         DCI.AddToWorklist(N);
5130       return SDValue(N, 0);
5131     }
5132     break;
5133   }
5134   case RISCVISD::FSLW:
5135   case RISCVISD::FSRW: {
5136     // Only the lower 32 bits of Values and lower 6 bits of shift amount are
5137     // read.
5138     SDValue Op0 = N->getOperand(0);
5139     SDValue Op1 = N->getOperand(1);
5140     SDValue ShAmt = N->getOperand(2);
5141     APInt OpMask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5142     APInt ShAmtMask = APInt::getLowBitsSet(ShAmt.getValueSizeInBits(), 6);
5143     if (SimplifyDemandedBits(Op0, OpMask, DCI) ||
5144         SimplifyDemandedBits(Op1, OpMask, DCI) ||
5145         SimplifyDemandedBits(ShAmt, ShAmtMask, DCI)) {
5146       if (N->getOpcode() != ISD::DELETED_NODE)
5147         DCI.AddToWorklist(N);
5148       return SDValue(N, 0);
5149     }
5150     break;
5151   }
5152   case RISCVISD::GREVW:
5153   case RISCVISD::GORCW: {
5154     // Only the lower 32 bits of the first operand are read
5155     SDValue Op0 = N->getOperand(0);
5156     APInt Mask = APInt::getLowBitsSet(Op0.getValueSizeInBits(), 32);
5157     if (SimplifyDemandedBits(Op0, Mask, DCI)) {
5158       if (N->getOpcode() != ISD::DELETED_NODE)
5159         DCI.AddToWorklist(N);
5160       return SDValue(N, 0);
5161     }
5162 
5163     return combineGREVI_GORCI(N, DCI.DAG);
5164   }
5165   case RISCVISD::FMV_X_ANYEXTW_RV64: {
5166     SDLoc DL(N);
5167     SDValue Op0 = N->getOperand(0);
5168     // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
5169     // conversion is unnecessary and can be replaced with an ANY_EXTEND
5170     // of the FMV_W_X_RV64 operand.
5171     if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
5172       assert(Op0.getOperand(0).getValueType() == MVT::i64 &&
5173              "Unexpected value type!");
5174       return Op0.getOperand(0);
5175     }
5176 
5177     // This is a target-specific version of a DAGCombine performed in
5178     // DAGCombiner::visitBITCAST. It performs the equivalent of:
5179     // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
5180     // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
5181     if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
5182         !Op0.getNode()->hasOneUse())
5183       break;
5184     SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
5185                                  Op0.getOperand(0));
5186     APInt SignBit = APInt::getSignMask(32).sext(64);
5187     if (Op0.getOpcode() == ISD::FNEG)
5188       return DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
5189                          DAG.getConstant(SignBit, DL, MVT::i64));
5190 
5191     assert(Op0.getOpcode() == ISD::FABS);
5192     return DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
5193                        DAG.getConstant(~SignBit, DL, MVT::i64));
5194   }
5195   case RISCVISD::GREV:
5196   case RISCVISD::GORC:
5197     return combineGREVI_GORCI(N, DCI.DAG);
5198   case ISD::OR:
5199     if (auto GREV = combineORToGREV(SDValue(N, 0), DCI.DAG, Subtarget))
5200       return GREV;
5201     if (auto GORC = combineORToGORC(SDValue(N, 0), DCI.DAG, Subtarget))
5202       return GORC;
5203     if (auto SHFL = combineORToSHFL(SDValue(N, 0), DCI.DAG, Subtarget))
5204       return SHFL;
5205     break;
5206   case RISCVISD::SELECT_CC: {
5207     // Transform
5208     SDValue LHS = N->getOperand(0);
5209     SDValue RHS = N->getOperand(1);
5210     auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
5211     if (!ISD::isIntEqualitySetCC(CCVal))
5212       break;
5213 
5214     // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
5215     //      (select_cc X, Y, lt, trueV, falseV)
5216     // Sometimes the setcc is introduced after select_cc has been formed.
5217     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5218         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5219       // If we're looking for eq 0 instead of ne 0, we need to invert the
5220       // condition.
5221       bool Invert = CCVal == ISD::SETEQ;
5222       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5223       if (Invert)
5224         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5225 
5226       SDLoc DL(N);
5227       RHS = LHS.getOperand(1);
5228       LHS = LHS.getOperand(0);
5229       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5230 
5231       SDValue TargetCC =
5232           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5233       return DAG.getNode(
5234           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5235           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5236     }
5237 
5238     // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
5239     //      (select_cc X, Y, eq/ne, trueV, falseV)
5240     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5241       return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
5242                          {LHS.getOperand(0), LHS.getOperand(1),
5243                           N->getOperand(2), N->getOperand(3),
5244                           N->getOperand(4)});
5245     // (select_cc X, 1, setne, trueV, falseV) ->
5246     // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
5247     // This can occur when legalizing some floating point comparisons.
5248     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5249     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5250       SDLoc DL(N);
5251       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5252       SDValue TargetCC =
5253           DAG.getTargetConstant(CCVal, DL, Subtarget.getXLenVT());
5254       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5255       return DAG.getNode(
5256           RISCVISD::SELECT_CC, DL, N->getValueType(0),
5257           {LHS, RHS, TargetCC, N->getOperand(3), N->getOperand(4)});
5258     }
5259 
5260     break;
5261   }
5262   case RISCVISD::BR_CC: {
5263     SDValue LHS = N->getOperand(1);
5264     SDValue RHS = N->getOperand(2);
5265     ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
5266     if (!ISD::isIntEqualitySetCC(CCVal))
5267       break;
5268 
5269     // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
5270     //      (br_cc X, Y, lt, dest)
5271     // Sometimes the setcc is introduced after br_cc has been formed.
5272     if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
5273         LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
5274       // If we're looking for eq 0 instead of ne 0, we need to invert the
5275       // condition.
5276       bool Invert = CCVal == ISD::SETEQ;
5277       CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
5278       if (Invert)
5279         CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5280 
5281       SDLoc DL(N);
5282       RHS = LHS.getOperand(1);
5283       LHS = LHS.getOperand(0);
5284       translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
5285 
5286       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5287                          N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
5288                          N->getOperand(4));
5289     }
5290 
5291     // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
5292     //      (br_cc X, Y, eq/ne, trueV, falseV)
5293     if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
5294       return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
5295                          N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
5296                          N->getOperand(3), N->getOperand(4));
5297 
5298     // (br_cc X, 1, setne, br_cc) ->
5299     // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
5300     // This can occur when legalizing some floating point comparisons.
5301     APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
5302     if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
5303       SDLoc DL(N);
5304       CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
5305       SDValue TargetCC = DAG.getCondCode(CCVal);
5306       RHS = DAG.getConstant(0, DL, LHS.getValueType());
5307       return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
5308                          N->getOperand(0), LHS, RHS, TargetCC,
5309                          N->getOperand(4));
5310     }
5311     break;
5312   }
5313   case ISD::FCOPYSIGN: {
5314     EVT VT = N->getValueType(0);
5315     if (!VT.isVector())
5316       break;
5317     // There is a form of VFSGNJ which injects the negated sign of its second
5318     // operand. Try and bubble any FNEG up after the extend/round to produce
5319     // this optimized pattern. Avoid modifying cases where FP_ROUND and
5320     // TRUNC=1.
5321     SDValue In2 = N->getOperand(1);
5322     // Avoid cases where the extend/round has multiple uses, as duplicating
5323     // those is typically more expensive than removing a fneg.
5324     if (!In2.hasOneUse())
5325       break;
5326     if (In2.getOpcode() != ISD::FP_EXTEND &&
5327         (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
5328       break;
5329     In2 = In2.getOperand(0);
5330     if (In2.getOpcode() != ISD::FNEG)
5331       break;
5332     SDLoc DL(N);
5333     SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
5334     return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
5335                        DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
5336   }
5337   case ISD::MGATHER:
5338   case ISD::MSCATTER: {
5339     if (!DCI.isBeforeLegalize())
5340       break;
5341     MaskedGatherScatterSDNode *MGSN = cast<MaskedGatherScatterSDNode>(N);
5342     SDValue Index = MGSN->getIndex();
5343     EVT IndexVT = Index.getValueType();
5344     MVT XLenVT = Subtarget.getXLenVT();
5345     // RISCV indexed loads only support the "unsigned unscaled" addressing
5346     // mode, so anything else must be manually legalized.
5347     bool NeedsIdxLegalization = MGSN->isIndexScaled() ||
5348                                 (MGSN->isIndexSigned() &&
5349                                  IndexVT.getVectorElementType().bitsLT(XLenVT));
5350     if (!NeedsIdxLegalization)
5351       break;
5352 
5353     SDLoc DL(N);
5354 
5355     // Any index legalization should first promote to XLenVT, so we don't lose
5356     // bits when scaling. This may create an illegal index type so we let
5357     // LLVM's legalization take care of the splitting.
5358     if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
5359       IndexVT = IndexVT.changeVectorElementType(XLenVT);
5360       Index = DAG.getNode(MGSN->isIndexSigned() ? ISD::SIGN_EXTEND
5361                                                 : ISD::ZERO_EXTEND,
5362                           DL, IndexVT, Index);
5363     }
5364 
5365     unsigned Scale = N->getConstantOperandVal(5);
5366     if (MGSN->isIndexScaled() && Scale != 1) {
5367       // Manually scale the indices by the element size.
5368       // TODO: Sanitize the scale operand here?
5369       assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
5370       SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
5371       Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
5372     }
5373 
5374     ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
5375     if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) {
5376       return DAG.getMaskedGather(
5377           N->getVTList(), MGSN->getMemoryVT(), DL,
5378           {MGSN->getChain(), MGN->getPassThru(), MGSN->getMask(),
5379            MGSN->getBasePtr(), Index, MGN->getScale()},
5380           MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
5381     }
5382     const auto *MSN = cast<MaskedScatterSDNode>(N);
5383     return DAG.getMaskedScatter(
5384         N->getVTList(), MGSN->getMemoryVT(), DL,
5385         {MGSN->getChain(), MSN->getValue(), MGSN->getMask(), MGSN->getBasePtr(),
5386          Index, MGSN->getScale()},
5387         MGSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
5388   }
5389   }
5390 
5391   return SDValue();
5392 }
5393 
5394 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
5395     const SDNode *N, CombineLevel Level) const {
5396   // The following folds are only desirable if `(OP _, c1 << c2)` can be
5397   // materialised in fewer instructions than `(OP _, c1)`:
5398   //
5399   //   (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
5400   //   (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
5401   SDValue N0 = N->getOperand(0);
5402   EVT Ty = N0.getValueType();
5403   if (Ty.isScalarInteger() &&
5404       (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
5405     auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
5406     auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
5407     if (C1 && C2) {
5408       const APInt &C1Int = C1->getAPIntValue();
5409       APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
5410 
5411       // We can materialise `c1 << c2` into an add immediate, so it's "free",
5412       // and the combine should happen, to potentially allow further combines
5413       // later.
5414       if (ShiftedC1Int.getMinSignedBits() <= 64 &&
5415           isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
5416         return true;
5417 
5418       // We can materialise `c1` in an add immediate, so it's "free", and the
5419       // combine should be prevented.
5420       if (C1Int.getMinSignedBits() <= 64 &&
5421           isLegalAddImmediate(C1Int.getSExtValue()))
5422         return false;
5423 
5424       // Neither constant will fit into an immediate, so find materialisation
5425       // costs.
5426       int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
5427                                               Subtarget.is64Bit());
5428       int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
5429           ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
5430 
5431       // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
5432       // combine should be prevented.
5433       if (C1Cost < ShiftedC1Cost)
5434         return false;
5435     }
5436   }
5437   return true;
5438 }
5439 
5440 bool RISCVTargetLowering::targetShrinkDemandedConstant(
5441     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
5442     TargetLoweringOpt &TLO) const {
5443   // Delay this optimization as late as possible.
5444   if (!TLO.LegalOps)
5445     return false;
5446 
5447   EVT VT = Op.getValueType();
5448   if (VT.isVector())
5449     return false;
5450 
5451   // Only handle AND for now.
5452   if (Op.getOpcode() != ISD::AND)
5453     return false;
5454 
5455   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5456   if (!C)
5457     return false;
5458 
5459   const APInt &Mask = C->getAPIntValue();
5460 
5461   // Clear all non-demanded bits initially.
5462   APInt ShrunkMask = Mask & DemandedBits;
5463 
5464   // Try to make a smaller immediate by setting undemanded bits.
5465 
5466   APInt ExpandedMask = Mask | ~DemandedBits;
5467 
5468   auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
5469     return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
5470   };
5471   auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
5472     if (NewMask == Mask)
5473       return true;
5474     SDLoc DL(Op);
5475     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
5476     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
5477     return TLO.CombineTo(Op, NewOp);
5478   };
5479 
5480   // If the shrunk mask fits in sign extended 12 bits, let the target
5481   // independent code apply it.
5482   if (ShrunkMask.isSignedIntN(12))
5483     return false;
5484 
5485   // Preserve (and X, 0xffff) when zext.h is supported.
5486   if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
5487     APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
5488     if (IsLegalMask(NewMask))
5489       return UseMask(NewMask);
5490   }
5491 
5492   // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
5493   if (VT == MVT::i64) {
5494     APInt NewMask = APInt(64, 0xffffffff);
5495     if (IsLegalMask(NewMask))
5496       return UseMask(NewMask);
5497   }
5498 
5499   // For the remaining optimizations, we need to be able to make a negative
5500   // number through a combination of mask and undemanded bits.
5501   if (!ExpandedMask.isNegative())
5502     return false;
5503 
5504   // What is the fewest number of bits we need to represent the negative number.
5505   unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
5506 
5507   // Try to make a 12 bit negative immediate. If that fails try to make a 32
5508   // bit negative immediate unless the shrunk immediate already fits in 32 bits.
5509   APInt NewMask = ShrunkMask;
5510   if (MinSignedBits <= 12)
5511     NewMask.setBitsFrom(11);
5512   else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
5513     NewMask.setBitsFrom(31);
5514   else
5515     return false;
5516 
5517   // Sanity check that our new mask is a subset of the demanded mask.
5518   assert(IsLegalMask(NewMask));
5519   return UseMask(NewMask);
5520 }
5521 
5522 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
5523                                                         KnownBits &Known,
5524                                                         const APInt &DemandedElts,
5525                                                         const SelectionDAG &DAG,
5526                                                         unsigned Depth) const {
5527   unsigned BitWidth = Known.getBitWidth();
5528   unsigned Opc = Op.getOpcode();
5529   assert((Opc >= ISD::BUILTIN_OP_END ||
5530           Opc == ISD::INTRINSIC_WO_CHAIN ||
5531           Opc == ISD::INTRINSIC_W_CHAIN ||
5532           Opc == ISD::INTRINSIC_VOID) &&
5533          "Should use MaskedValueIsZero if you don't know whether Op"
5534          " is a target node!");
5535 
5536   Known.resetAll();
5537   switch (Opc) {
5538   default: break;
5539   case RISCVISD::SELECT_CC: {
5540     Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
5541     // If we don't know any bits, early out.
5542     if (Known.isUnknown())
5543       break;
5544     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
5545 
5546     // Only known if known in both the LHS and RHS.
5547     Known = KnownBits::commonBits(Known, Known2);
5548     break;
5549   }
5550   case RISCVISD::REMUW: {
5551     KnownBits Known2;
5552     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5553     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5554     // We only care about the lower 32 bits.
5555     Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
5556     // Restore the original width by sign extending.
5557     Known = Known.sext(BitWidth);
5558     break;
5559   }
5560   case RISCVISD::DIVUW: {
5561     KnownBits Known2;
5562     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
5563     Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
5564     // We only care about the lower 32 bits.
5565     Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
5566     // Restore the original width by sign extending.
5567     Known = Known.sext(BitWidth);
5568     break;
5569   }
5570   case RISCVISD::CTZW: {
5571     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5572     unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
5573     unsigned LowBits = Log2_32(PossibleTZ) + 1;
5574     Known.Zero.setBitsFrom(LowBits);
5575     break;
5576   }
5577   case RISCVISD::CLZW: {
5578     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5579     unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
5580     unsigned LowBits = Log2_32(PossibleLZ) + 1;
5581     Known.Zero.setBitsFrom(LowBits);
5582     break;
5583   }
5584   case RISCVISD::READ_VLENB:
5585     // We assume VLENB is at least 16 bytes.
5586     Known.Zero.setLowBits(4);
5587     break;
5588   }
5589 }
5590 
5591 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
5592     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
5593     unsigned Depth) const {
5594   switch (Op.getOpcode()) {
5595   default:
5596     break;
5597   case RISCVISD::SLLW:
5598   case RISCVISD::SRAW:
5599   case RISCVISD::SRLW:
5600   case RISCVISD::DIVW:
5601   case RISCVISD::DIVUW:
5602   case RISCVISD::REMUW:
5603   case RISCVISD::ROLW:
5604   case RISCVISD::RORW:
5605   case RISCVISD::GREVW:
5606   case RISCVISD::GORCW:
5607   case RISCVISD::FSLW:
5608   case RISCVISD::FSRW:
5609     // TODO: As the result is sign-extended, this is conservatively correct. A
5610     // more precise answer could be calculated for SRAW depending on known
5611     // bits in the shift amount.
5612     return 33;
5613   case RISCVISD::SHFL: {
5614     // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
5615     // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
5616     // will stay within the upper 32 bits. If there were more than 32 sign bits
5617     // before there will be at least 33 sign bits after.
5618     if (Op.getValueType() == MVT::i64 &&
5619         isa<ConstantSDNode>(Op.getOperand(1)) &&
5620         (Op.getConstantOperandVal(1) & 0x10) == 0) {
5621       unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5622       if (Tmp > 32)
5623         return 33;
5624     }
5625     break;
5626   }
5627   case RISCVISD::VMV_X_S:
5628     // The number of sign bits of the scalar result is computed by obtaining the
5629     // element type of the input vector operand, subtracting its width from the
5630     // XLEN, and then adding one (sign bit within the element type). If the
5631     // element type is wider than XLen, the least-significant XLEN bits are
5632     // taken.
5633     if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
5634       return 1;
5635     return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
5636   }
5637 
5638   return 1;
5639 }
5640 
5641 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
5642                                                   MachineBasicBlock *BB) {
5643   assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
5644 
5645   // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
5646   // Should the count have wrapped while it was being read, we need to try
5647   // again.
5648   // ...
5649   // read:
5650   // rdcycleh x3 # load high word of cycle
5651   // rdcycle  x2 # load low word of cycle
5652   // rdcycleh x4 # load high word of cycle
5653   // bne x3, x4, read # check if high word reads match, otherwise try again
5654   // ...
5655 
5656   MachineFunction &MF = *BB->getParent();
5657   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5658   MachineFunction::iterator It = ++BB->getIterator();
5659 
5660   MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
5661   MF.insert(It, LoopMBB);
5662 
5663   MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
5664   MF.insert(It, DoneMBB);
5665 
5666   // Transfer the remainder of BB and its successor edges to DoneMBB.
5667   DoneMBB->splice(DoneMBB->begin(), BB,
5668                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
5669   DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
5670 
5671   BB->addSuccessor(LoopMBB);
5672 
5673   MachineRegisterInfo &RegInfo = MF.getRegInfo();
5674   Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
5675   Register LoReg = MI.getOperand(0).getReg();
5676   Register HiReg = MI.getOperand(1).getReg();
5677   DebugLoc DL = MI.getDebugLoc();
5678 
5679   const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
5680   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
5681       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
5682       .addReg(RISCV::X0);
5683   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
5684       .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
5685       .addReg(RISCV::X0);
5686   BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
5687       .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
5688       .addReg(RISCV::X0);
5689 
5690   BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
5691       .addReg(HiReg)
5692       .addReg(ReadAgainReg)
5693       .addMBB(LoopMBB);
5694 
5695   LoopMBB->addSuccessor(LoopMBB);
5696   LoopMBB->addSuccessor(DoneMBB);
5697 
5698   MI.eraseFromParent();
5699 
5700   return DoneMBB;
5701 }
5702 
5703 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
5704                                              MachineBasicBlock *BB) {
5705   assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
5706 
5707   MachineFunction &MF = *BB->getParent();
5708   DebugLoc DL = MI.getDebugLoc();
5709   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5710   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
5711   Register LoReg = MI.getOperand(0).getReg();
5712   Register HiReg = MI.getOperand(1).getReg();
5713   Register SrcReg = MI.getOperand(2).getReg();
5714   const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
5715   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
5716 
5717   TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
5718                           RI);
5719   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
5720   MachineMemOperand *MMOLo =
5721       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
5722   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
5723       MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
5724   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
5725       .addFrameIndex(FI)
5726       .addImm(0)
5727       .addMemOperand(MMOLo);
5728   BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
5729       .addFrameIndex(FI)
5730       .addImm(4)
5731       .addMemOperand(MMOHi);
5732   MI.eraseFromParent(); // The pseudo instruction is gone now.
5733   return BB;
5734 }
5735 
5736 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
5737                                                  MachineBasicBlock *BB) {
5738   assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
5739          "Unexpected instruction");
5740 
5741   MachineFunction &MF = *BB->getParent();
5742   DebugLoc DL = MI.getDebugLoc();
5743   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5744   const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
5745   Register DstReg = MI.getOperand(0).getReg();
5746   Register LoReg = MI.getOperand(1).getReg();
5747   Register HiReg = MI.getOperand(2).getReg();
5748   const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
5749   int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
5750 
5751   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
5752   MachineMemOperand *MMOLo =
5753       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
5754   MachineMemOperand *MMOHi = MF.getMachineMemOperand(
5755       MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
5756   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
5757       .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
5758       .addFrameIndex(FI)
5759       .addImm(0)
5760       .addMemOperand(MMOLo);
5761   BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
5762       .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
5763       .addFrameIndex(FI)
5764       .addImm(4)
5765       .addMemOperand(MMOHi);
5766   TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
5767   MI.eraseFromParent(); // The pseudo instruction is gone now.
5768   return BB;
5769 }
5770 
5771 static bool isSelectPseudo(MachineInstr &MI) {
5772   switch (MI.getOpcode()) {
5773   default:
5774     return false;
5775   case RISCV::Select_GPR_Using_CC_GPR:
5776   case RISCV::Select_FPR16_Using_CC_GPR:
5777   case RISCV::Select_FPR32_Using_CC_GPR:
5778   case RISCV::Select_FPR64_Using_CC_GPR:
5779     return true;
5780   }
5781 }
5782 
5783 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
5784                                            MachineBasicBlock *BB) {
5785   // To "insert" Select_* instructions, we actually have to insert the triangle
5786   // control-flow pattern.  The incoming instructions know the destination vreg
5787   // to set, the condition code register to branch on, the true/false values to
5788   // select between, and the condcode to use to select the appropriate branch.
5789   //
5790   // We produce the following control flow:
5791   //     HeadMBB
5792   //     |  \
5793   //     |  IfFalseMBB
5794   //     | /
5795   //    TailMBB
5796   //
5797   // When we find a sequence of selects we attempt to optimize their emission
5798   // by sharing the control flow. Currently we only handle cases where we have
5799   // multiple selects with the exact same condition (same LHS, RHS and CC).
5800   // The selects may be interleaved with other instructions if the other
5801   // instructions meet some requirements we deem safe:
5802   // - They are debug instructions. Otherwise,
5803   // - They do not have side-effects, do not access memory and their inputs do
5804   //   not depend on the results of the select pseudo-instructions.
5805   // The TrueV/FalseV operands of the selects cannot depend on the result of
5806   // previous selects in the sequence.
5807   // These conditions could be further relaxed. See the X86 target for a
5808   // related approach and more information.
5809   Register LHS = MI.getOperand(1).getReg();
5810   Register RHS = MI.getOperand(2).getReg();
5811   auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
5812 
5813   SmallVector<MachineInstr *, 4> SelectDebugValues;
5814   SmallSet<Register, 4> SelectDests;
5815   SelectDests.insert(MI.getOperand(0).getReg());
5816 
5817   MachineInstr *LastSelectPseudo = &MI;
5818 
5819   for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
5820        SequenceMBBI != E; ++SequenceMBBI) {
5821     if (SequenceMBBI->isDebugInstr())
5822       continue;
5823     else if (isSelectPseudo(*SequenceMBBI)) {
5824       if (SequenceMBBI->getOperand(1).getReg() != LHS ||
5825           SequenceMBBI->getOperand(2).getReg() != RHS ||
5826           SequenceMBBI->getOperand(3).getImm() != CC ||
5827           SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
5828           SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
5829         break;
5830       LastSelectPseudo = &*SequenceMBBI;
5831       SequenceMBBI->collectDebugValues(SelectDebugValues);
5832       SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
5833     } else {
5834       if (SequenceMBBI->hasUnmodeledSideEffects() ||
5835           SequenceMBBI->mayLoadOrStore())
5836         break;
5837       if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
5838             return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
5839           }))
5840         break;
5841     }
5842   }
5843 
5844   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
5845   const BasicBlock *LLVM_BB = BB->getBasicBlock();
5846   DebugLoc DL = MI.getDebugLoc();
5847   MachineFunction::iterator I = ++BB->getIterator();
5848 
5849   MachineBasicBlock *HeadMBB = BB;
5850   MachineFunction *F = BB->getParent();
5851   MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
5852   MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
5853 
5854   F->insert(I, IfFalseMBB);
5855   F->insert(I, TailMBB);
5856 
5857   // Transfer debug instructions associated with the selects to TailMBB.
5858   for (MachineInstr *DebugInstr : SelectDebugValues) {
5859     TailMBB->push_back(DebugInstr->removeFromParent());
5860   }
5861 
5862   // Move all instructions after the sequence to TailMBB.
5863   TailMBB->splice(TailMBB->end(), HeadMBB,
5864                   std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
5865   // Update machine-CFG edges by transferring all successors of the current
5866   // block to the new block which will contain the Phi nodes for the selects.
5867   TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
5868   // Set the successors for HeadMBB.
5869   HeadMBB->addSuccessor(IfFalseMBB);
5870   HeadMBB->addSuccessor(TailMBB);
5871 
5872   // Insert appropriate branch.
5873   unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
5874 
5875   BuildMI(HeadMBB, DL, TII.get(Opcode))
5876     .addReg(LHS)
5877     .addReg(RHS)
5878     .addMBB(TailMBB);
5879 
5880   // IfFalseMBB just falls through to TailMBB.
5881   IfFalseMBB->addSuccessor(TailMBB);
5882 
5883   // Create PHIs for all of the select pseudo-instructions.
5884   auto SelectMBBI = MI.getIterator();
5885   auto SelectEnd = std::next(LastSelectPseudo->getIterator());
5886   auto InsertionPoint = TailMBB->begin();
5887   while (SelectMBBI != SelectEnd) {
5888     auto Next = std::next(SelectMBBI);
5889     if (isSelectPseudo(*SelectMBBI)) {
5890       // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
5891       BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
5892               TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
5893           .addReg(SelectMBBI->getOperand(4).getReg())
5894           .addMBB(HeadMBB)
5895           .addReg(SelectMBBI->getOperand(5).getReg())
5896           .addMBB(IfFalseMBB);
5897       SelectMBBI->eraseFromParent();
5898     }
5899     SelectMBBI = Next;
5900   }
5901 
5902   F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
5903   return TailMBB;
5904 }
5905 
5906 static MachineInstr *elideCopies(MachineInstr *MI,
5907                                  const MachineRegisterInfo &MRI) {
5908   while (true) {
5909     if (!MI->isFullCopy())
5910       return MI;
5911     if (!Register::isVirtualRegister(MI->getOperand(1).getReg()))
5912       return nullptr;
5913     MI = MRI.getVRegDef(MI->getOperand(1).getReg());
5914     if (!MI)
5915       return nullptr;
5916   }
5917 }
5918 
5919 static MachineBasicBlock *addVSetVL(MachineInstr &MI, MachineBasicBlock *BB,
5920                                     int VLIndex, unsigned SEWIndex,
5921                                     RISCVVLMUL VLMul, bool ForceTailAgnostic) {
5922   MachineFunction &MF = *BB->getParent();
5923   DebugLoc DL = MI.getDebugLoc();
5924   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
5925 
5926   unsigned SEW = MI.getOperand(SEWIndex).getImm();
5927   assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
5928   RISCVVSEW ElementWidth = static_cast<RISCVVSEW>(Log2_32(SEW / 8));
5929 
5930   MachineRegisterInfo &MRI = MF.getRegInfo();
5931 
5932   auto BuildVSETVLI = [&]() {
5933     if (VLIndex >= 0) {
5934       Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
5935       Register VLReg = MI.getOperand(VLIndex).getReg();
5936 
5937       // VL might be a compile time constant, but isel would have to put it
5938       // in a register. See if VL comes from an ADDI X0, imm.
5939       if (VLReg.isVirtual()) {
5940         MachineInstr *Def = MRI.getVRegDef(VLReg);
5941         if (Def && Def->getOpcode() == RISCV::ADDI &&
5942             Def->getOperand(1).getReg() == RISCV::X0 &&
5943             Def->getOperand(2).isImm()) {
5944           uint64_t Imm = Def->getOperand(2).getImm();
5945           // VSETIVLI allows a 5-bit zero extended immediate.
5946           if (isUInt<5>(Imm))
5947             return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
5948                 .addReg(DestReg, RegState::Define | RegState::Dead)
5949                 .addImm(Imm);
5950         }
5951       }
5952 
5953       return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
5954           .addReg(DestReg, RegState::Define | RegState::Dead)
5955           .addReg(VLReg);
5956     }
5957 
5958     // With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
5959     return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
5960         .addReg(RISCV::X0, RegState::Define | RegState::Dead)
5961         .addReg(RISCV::X0, RegState::Kill);
5962   };
5963 
5964   MachineInstrBuilder MIB = BuildVSETVLI();
5965 
5966   // Default to tail agnostic unless the destination is tied to a source. In
5967   // that case the user would have some control over the tail values. The tail
5968   // policy is also ignored on instructions that only update element 0 like
5969   // vmv.s.x or reductions so use agnostic there to match the common case.
5970   // FIXME: This is conservatively correct, but we might want to detect that
5971   // the input is undefined.
5972   bool TailAgnostic = true;
5973   unsigned UseOpIdx;
5974   if (!ForceTailAgnostic && MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
5975     TailAgnostic = false;
5976     // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
5977     const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
5978     MachineInstr *UseMI = MRI.getVRegDef(UseMO.getReg());
5979     if (UseMI) {
5980       UseMI = elideCopies(UseMI, MRI);
5981       if (UseMI && UseMI->isImplicitDef())
5982         TailAgnostic = true;
5983     }
5984   }
5985 
5986   // For simplicity we reuse the vtype representation here.
5987   MIB.addImm(RISCVVType::encodeVTYPE(VLMul, ElementWidth,
5988                                      /*TailAgnostic*/ TailAgnostic,
5989                                      /*MaskAgnostic*/ false));
5990 
5991   // Remove (now) redundant operands from pseudo
5992   if (VLIndex >= 0) {
5993     MI.getOperand(VLIndex).setReg(RISCV::NoRegister);
5994     MI.getOperand(VLIndex).setIsKill(false);
5995   }
5996 
5997   return BB;
5998 }
5999 
6000 MachineBasicBlock *
6001 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
6002                                                  MachineBasicBlock *BB) const {
6003   uint64_t TSFlags = MI.getDesc().TSFlags;
6004 
6005   if (TSFlags & RISCVII::HasSEWOpMask) {
6006     unsigned NumOperands = MI.getNumExplicitOperands();
6007     int VLIndex = (TSFlags & RISCVII::HasVLOpMask) ? NumOperands - 2 : -1;
6008     unsigned SEWIndex = NumOperands - 1;
6009     bool ForceTailAgnostic = TSFlags & RISCVII::ForceTailAgnosticMask;
6010 
6011     RISCVVLMUL VLMul = static_cast<RISCVVLMUL>((TSFlags & RISCVII::VLMulMask) >>
6012                                                RISCVII::VLMulShift);
6013     return addVSetVL(MI, BB, VLIndex, SEWIndex, VLMul, ForceTailAgnostic);
6014   }
6015 
6016   switch (MI.getOpcode()) {
6017   default:
6018     llvm_unreachable("Unexpected instr type to insert");
6019   case RISCV::ReadCycleWide:
6020     assert(!Subtarget.is64Bit() &&
6021            "ReadCycleWrite is only to be used on riscv32");
6022     return emitReadCycleWidePseudo(MI, BB);
6023   case RISCV::Select_GPR_Using_CC_GPR:
6024   case RISCV::Select_FPR16_Using_CC_GPR:
6025   case RISCV::Select_FPR32_Using_CC_GPR:
6026   case RISCV::Select_FPR64_Using_CC_GPR:
6027     return emitSelectPseudo(MI, BB);
6028   case RISCV::BuildPairF64Pseudo:
6029     return emitBuildPairF64Pseudo(MI, BB);
6030   case RISCV::SplitF64Pseudo:
6031     return emitSplitF64Pseudo(MI, BB);
6032   }
6033 }
6034 
6035 // Calling Convention Implementation.
6036 // The expectations for frontend ABI lowering vary from target to target.
6037 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
6038 // details, but this is a longer term goal. For now, we simply try to keep the
6039 // role of the frontend as simple and well-defined as possible. The rules can
6040 // be summarised as:
6041 // * Never split up large scalar arguments. We handle them here.
6042 // * If a hardfloat calling convention is being used, and the struct may be
6043 // passed in a pair of registers (fp+fp, int+fp), and both registers are
6044 // available, then pass as two separate arguments. If either the GPRs or FPRs
6045 // are exhausted, then pass according to the rule below.
6046 // * If a struct could never be passed in registers or directly in a stack
6047 // slot (as it is larger than 2*XLEN and the floating point rules don't
6048 // apply), then pass it using a pointer with the byval attribute.
6049 // * If a struct is less than 2*XLEN, then coerce to either a two-element
6050 // word-sized array or a 2*XLEN scalar (depending on alignment).
6051 // * The frontend can determine whether a struct is returned by reference or
6052 // not based on its size and fields. If it will be returned by reference, the
6053 // frontend must modify the prototype so a pointer with the sret annotation is
6054 // passed as the first argument. This is not necessary for large scalar
6055 // returns.
6056 // * Struct return values and varargs should be coerced to structs containing
6057 // register-size fields in the same situations they would be for fixed
6058 // arguments.
6059 
6060 static const MCPhysReg ArgGPRs[] = {
6061   RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
6062   RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
6063 };
6064 static const MCPhysReg ArgFPR16s[] = {
6065   RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
6066   RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
6067 };
6068 static const MCPhysReg ArgFPR32s[] = {
6069   RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
6070   RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
6071 };
6072 static const MCPhysReg ArgFPR64s[] = {
6073   RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
6074   RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
6075 };
6076 // This is an interim calling convention and it may be changed in the future.
6077 static const MCPhysReg ArgVRs[] = {
6078     RISCV::V8,  RISCV::V9,  RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
6079     RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
6080     RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
6081 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2,  RISCV::V10M2, RISCV::V12M2,
6082                                      RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
6083                                      RISCV::V20M2, RISCV::V22M2};
6084 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
6085                                      RISCV::V20M4};
6086 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
6087 
6088 // Pass a 2*XLEN argument that has been split into two XLEN values through
6089 // registers or the stack as necessary.
6090 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
6091                                 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
6092                                 MVT ValVT2, MVT LocVT2,
6093                                 ISD::ArgFlagsTy ArgFlags2) {
6094   unsigned XLenInBytes = XLen / 8;
6095   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6096     // At least one half can be passed via register.
6097     State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
6098                                      VA1.getLocVT(), CCValAssign::Full));
6099   } else {
6100     // Both halves must be passed on the stack, with proper alignment.
6101     Align StackAlign =
6102         std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
6103     State.addLoc(
6104         CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
6105                             State.AllocateStack(XLenInBytes, StackAlign),
6106                             VA1.getLocVT(), CCValAssign::Full));
6107     State.addLoc(CCValAssign::getMem(
6108         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6109         LocVT2, CCValAssign::Full));
6110     return false;
6111   }
6112 
6113   if (Register Reg = State.AllocateReg(ArgGPRs)) {
6114     // The second half can also be passed via register.
6115     State.addLoc(
6116         CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
6117   } else {
6118     // The second half is passed via the stack, without additional alignment.
6119     State.addLoc(CCValAssign::getMem(
6120         ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
6121         LocVT2, CCValAssign::Full));
6122   }
6123 
6124   return false;
6125 }
6126 
6127 // Implements the RISC-V calling convention. Returns true upon failure.
6128 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
6129                      MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
6130                      ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
6131                      bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
6132                      Optional<unsigned> FirstMaskArgument) {
6133   unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
6134   assert(XLen == 32 || XLen == 64);
6135   MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
6136 
6137   // Any return value split in to more than two values can't be returned
6138   // directly. Vectors are returned via the available vector registers.
6139   if (!LocVT.isVector() && IsRet && ValNo > 1)
6140     return true;
6141 
6142   // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
6143   // variadic argument, or if no F16/F32 argument registers are available.
6144   bool UseGPRForF16_F32 = true;
6145   // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
6146   // variadic argument, or if no F64 argument registers are available.
6147   bool UseGPRForF64 = true;
6148 
6149   switch (ABI) {
6150   default:
6151     llvm_unreachable("Unexpected ABI");
6152   case RISCVABI::ABI_ILP32:
6153   case RISCVABI::ABI_LP64:
6154     break;
6155   case RISCVABI::ABI_ILP32F:
6156   case RISCVABI::ABI_LP64F:
6157     UseGPRForF16_F32 = !IsFixed;
6158     break;
6159   case RISCVABI::ABI_ILP32D:
6160   case RISCVABI::ABI_LP64D:
6161     UseGPRForF16_F32 = !IsFixed;
6162     UseGPRForF64 = !IsFixed;
6163     break;
6164   }
6165 
6166   // FPR16, FPR32, and FPR64 alias each other.
6167   if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
6168     UseGPRForF16_F32 = true;
6169     UseGPRForF64 = true;
6170   }
6171 
6172   // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
6173   // similar local variables rather than directly checking against the target
6174   // ABI.
6175 
6176   if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
6177     LocVT = XLenVT;
6178     LocInfo = CCValAssign::BCvt;
6179   } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
6180     LocVT = MVT::i64;
6181     LocInfo = CCValAssign::BCvt;
6182   }
6183 
6184   // If this is a variadic argument, the RISC-V calling convention requires
6185   // that it is assigned an 'even' or 'aligned' register if it has 8-byte
6186   // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
6187   // be used regardless of whether the original argument was split during
6188   // legalisation or not. The argument will not be passed by registers if the
6189   // original type is larger than 2*XLEN, so the register alignment rule does
6190   // not apply.
6191   unsigned TwoXLenInBytes = (2 * XLen) / 8;
6192   if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
6193       DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
6194     unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
6195     // Skip 'odd' register if necessary.
6196     if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
6197       State.AllocateReg(ArgGPRs);
6198   }
6199 
6200   SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
6201   SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
6202       State.getPendingArgFlags();
6203 
6204   assert(PendingLocs.size() == PendingArgFlags.size() &&
6205          "PendingLocs and PendingArgFlags out of sync");
6206 
6207   // Handle passing f64 on RV32D with a soft float ABI or when floating point
6208   // registers are exhausted.
6209   if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
6210     assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
6211            "Can't lower f64 if it is split");
6212     // Depending on available argument GPRS, f64 may be passed in a pair of
6213     // GPRs, split between a GPR and the stack, or passed completely on the
6214     // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
6215     // cases.
6216     Register Reg = State.AllocateReg(ArgGPRs);
6217     LocVT = MVT::i32;
6218     if (!Reg) {
6219       unsigned StackOffset = State.AllocateStack(8, Align(8));
6220       State.addLoc(
6221           CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6222       return false;
6223     }
6224     if (!State.AllocateReg(ArgGPRs))
6225       State.AllocateStack(4, Align(4));
6226     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6227     return false;
6228   }
6229 
6230   // Fixed-length vectors are located in the corresponding scalable-vector
6231   // container types.
6232   if (ValVT.isFixedLengthVector())
6233     LocVT = TLI.getContainerForFixedLengthVector(LocVT);
6234 
6235   // Split arguments might be passed indirectly, so keep track of the pending
6236   // values. Split vectors are passed via a mix of registers and indirectly, so
6237   // treat them as we would any other argument.
6238   if (!LocVT.isVector() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
6239     LocVT = XLenVT;
6240     LocInfo = CCValAssign::Indirect;
6241     PendingLocs.push_back(
6242         CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
6243     PendingArgFlags.push_back(ArgFlags);
6244     if (!ArgFlags.isSplitEnd()) {
6245       return false;
6246     }
6247   }
6248 
6249   // If the split argument only had two elements, it should be passed directly
6250   // in registers or on the stack.
6251   if (!LocVT.isVector() && ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
6252     assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
6253     // Apply the normal calling convention rules to the first half of the
6254     // split argument.
6255     CCValAssign VA = PendingLocs[0];
6256     ISD::ArgFlagsTy AF = PendingArgFlags[0];
6257     PendingLocs.clear();
6258     PendingArgFlags.clear();
6259     return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
6260                                ArgFlags);
6261   }
6262 
6263   // Allocate to a register if possible, or else a stack slot.
6264   Register Reg;
6265   if (ValVT == MVT::f16 && !UseGPRForF16_F32)
6266     Reg = State.AllocateReg(ArgFPR16s);
6267   else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
6268     Reg = State.AllocateReg(ArgFPR32s);
6269   else if (ValVT == MVT::f64 && !UseGPRForF64)
6270     Reg = State.AllocateReg(ArgFPR64s);
6271   else if (ValVT.isVector()) {
6272     const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
6273     if (RC == &RISCV::VRRegClass) {
6274       // Assign the first mask argument to V0.
6275       // This is an interim calling convention and it may be changed in the
6276       // future.
6277       if (FirstMaskArgument.hasValue() &&
6278           ValNo == FirstMaskArgument.getValue()) {
6279         Reg = State.AllocateReg(RISCV::V0);
6280       } else {
6281         Reg = State.AllocateReg(ArgVRs);
6282       }
6283     } else if (RC == &RISCV::VRM2RegClass) {
6284       Reg = State.AllocateReg(ArgVRM2s);
6285     } else if (RC == &RISCV::VRM4RegClass) {
6286       Reg = State.AllocateReg(ArgVRM4s);
6287     } else if (RC == &RISCV::VRM8RegClass) {
6288       Reg = State.AllocateReg(ArgVRM8s);
6289     } else {
6290       llvm_unreachable("Unhandled class register for ValueType");
6291     }
6292     if (!Reg) {
6293       // For return values, the vector must be passed fully via registers or
6294       // via the stack.
6295       // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
6296       // but we're using all of them.
6297       if (IsRet)
6298         return true;
6299       LocInfo = CCValAssign::Indirect;
6300       // Try using a GPR to pass the address
6301       Reg = State.AllocateReg(ArgGPRs);
6302       LocVT = XLenVT;
6303     }
6304   } else
6305     Reg = State.AllocateReg(ArgGPRs);
6306   unsigned StackOffset =
6307       Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
6308 
6309   // If we reach this point and PendingLocs is non-empty, we must be at the
6310   // end of a split argument that must be passed indirectly.
6311   if (!PendingLocs.empty()) {
6312     assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
6313     assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
6314 
6315     for (auto &It : PendingLocs) {
6316       if (Reg)
6317         It.convertToReg(Reg);
6318       else
6319         It.convertToMem(StackOffset);
6320       State.addLoc(It);
6321     }
6322     PendingLocs.clear();
6323     PendingArgFlags.clear();
6324     return false;
6325   }
6326 
6327   assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
6328           (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
6329          "Expected an XLenVT or vector types at this stage");
6330 
6331   if (Reg) {
6332     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6333     return false;
6334   }
6335 
6336   // When a floating-point value is passed on the stack, no bit-conversion is
6337   // needed.
6338   if (ValVT.isFloatingPoint()) {
6339     LocVT = ValVT;
6340     LocInfo = CCValAssign::Full;
6341   }
6342   State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
6343   return false;
6344 }
6345 
6346 template <typename ArgTy>
6347 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
6348   for (const auto &ArgIdx : enumerate(Args)) {
6349     MVT ArgVT = ArgIdx.value().VT;
6350     if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
6351       return ArgIdx.index();
6352   }
6353   return None;
6354 }
6355 
6356 void RISCVTargetLowering::analyzeInputArgs(
6357     MachineFunction &MF, CCState &CCInfo,
6358     const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
6359   unsigned NumArgs = Ins.size();
6360   FunctionType *FType = MF.getFunction().getFunctionType();
6361 
6362   Optional<unsigned> FirstMaskArgument;
6363   if (Subtarget.hasStdExtV())
6364     FirstMaskArgument = preAssignMask(Ins);
6365 
6366   for (unsigned i = 0; i != NumArgs; ++i) {
6367     MVT ArgVT = Ins[i].VT;
6368     ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
6369 
6370     Type *ArgTy = nullptr;
6371     if (IsRet)
6372       ArgTy = FType->getReturnType();
6373     else if (Ins[i].isOrigArg())
6374       ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
6375 
6376     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6377     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6378                  ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
6379                  FirstMaskArgument)) {
6380       LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
6381                         << EVT(ArgVT).getEVTString() << '\n');
6382       llvm_unreachable(nullptr);
6383     }
6384   }
6385 }
6386 
6387 void RISCVTargetLowering::analyzeOutputArgs(
6388     MachineFunction &MF, CCState &CCInfo,
6389     const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
6390     CallLoweringInfo *CLI) const {
6391   unsigned NumArgs = Outs.size();
6392 
6393   Optional<unsigned> FirstMaskArgument;
6394   if (Subtarget.hasStdExtV())
6395     FirstMaskArgument = preAssignMask(Outs);
6396 
6397   for (unsigned i = 0; i != NumArgs; i++) {
6398     MVT ArgVT = Outs[i].VT;
6399     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
6400     Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
6401 
6402     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
6403     if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
6404                  ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
6405                  FirstMaskArgument)) {
6406       LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
6407                         << EVT(ArgVT).getEVTString() << "\n");
6408       llvm_unreachable(nullptr);
6409     }
6410   }
6411 }
6412 
6413 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
6414 // values.
6415 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
6416                                    const CCValAssign &VA, const SDLoc &DL,
6417                                    const RISCVSubtarget &Subtarget) {
6418   switch (VA.getLocInfo()) {
6419   default:
6420     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6421   case CCValAssign::Full:
6422     if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
6423       Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
6424     break;
6425   case CCValAssign::BCvt:
6426     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6427       Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
6428     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6429       Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
6430     else
6431       Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
6432     break;
6433   }
6434   return Val;
6435 }
6436 
6437 // The caller is responsible for loading the full value if the argument is
6438 // passed with CCValAssign::Indirect.
6439 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
6440                                 const CCValAssign &VA, const SDLoc &DL,
6441                                 const RISCVTargetLowering &TLI) {
6442   MachineFunction &MF = DAG.getMachineFunction();
6443   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6444   EVT LocVT = VA.getLocVT();
6445   SDValue Val;
6446   const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
6447   Register VReg = RegInfo.createVirtualRegister(RC);
6448   RegInfo.addLiveIn(VA.getLocReg(), VReg);
6449   Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
6450 
6451   if (VA.getLocInfo() == CCValAssign::Indirect)
6452     return Val;
6453 
6454   return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
6455 }
6456 
6457 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
6458                                    const CCValAssign &VA, const SDLoc &DL,
6459                                    const RISCVSubtarget &Subtarget) {
6460   EVT LocVT = VA.getLocVT();
6461 
6462   switch (VA.getLocInfo()) {
6463   default:
6464     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6465   case CCValAssign::Full:
6466     if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
6467       Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
6468     break;
6469   case CCValAssign::BCvt:
6470     if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
6471       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
6472     else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
6473       Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
6474     else
6475       Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
6476     break;
6477   }
6478   return Val;
6479 }
6480 
6481 // The caller is responsible for loading the full value if the argument is
6482 // passed with CCValAssign::Indirect.
6483 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
6484                                 const CCValAssign &VA, const SDLoc &DL) {
6485   MachineFunction &MF = DAG.getMachineFunction();
6486   MachineFrameInfo &MFI = MF.getFrameInfo();
6487   EVT LocVT = VA.getLocVT();
6488   EVT ValVT = VA.getValVT();
6489   EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
6490   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
6491                                  VA.getLocMemOffset(), /*Immutable=*/true);
6492   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6493   SDValue Val;
6494 
6495   ISD::LoadExtType ExtType;
6496   switch (VA.getLocInfo()) {
6497   default:
6498     llvm_unreachable("Unexpected CCValAssign::LocInfo");
6499   case CCValAssign::Full:
6500   case CCValAssign::Indirect:
6501   case CCValAssign::BCvt:
6502     ExtType = ISD::NON_EXTLOAD;
6503     break;
6504   }
6505   Val = DAG.getExtLoad(
6506       ExtType, DL, LocVT, Chain, FIN,
6507       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
6508   return Val;
6509 }
6510 
6511 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
6512                                        const CCValAssign &VA, const SDLoc &DL) {
6513   assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
6514          "Unexpected VA");
6515   MachineFunction &MF = DAG.getMachineFunction();
6516   MachineFrameInfo &MFI = MF.getFrameInfo();
6517   MachineRegisterInfo &RegInfo = MF.getRegInfo();
6518 
6519   if (VA.isMemLoc()) {
6520     // f64 is passed on the stack.
6521     int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
6522     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
6523     return DAG.getLoad(MVT::f64, DL, Chain, FIN,
6524                        MachinePointerInfo::getFixedStack(MF, FI));
6525   }
6526 
6527   assert(VA.isRegLoc() && "Expected register VA assignment");
6528 
6529   Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6530   RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
6531   SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
6532   SDValue Hi;
6533   if (VA.getLocReg() == RISCV::X17) {
6534     // Second half of f64 is passed on the stack.
6535     int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
6536     SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
6537     Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
6538                      MachinePointerInfo::getFixedStack(MF, FI));
6539   } else {
6540     // Second half of f64 is passed in another GPR.
6541     Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
6542     RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
6543     Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
6544   }
6545   return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
6546 }
6547 
6548 // FastCC has less than 1% performance improvement for some particular
6549 // benchmark. But theoretically, it may has benenfit for some cases.
6550 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
6551                             CCValAssign::LocInfo LocInfo,
6552                             ISD::ArgFlagsTy ArgFlags, CCState &State) {
6553 
6554   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
6555     // X5 and X6 might be used for save-restore libcall.
6556     static const MCPhysReg GPRList[] = {
6557         RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
6558         RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7,  RISCV::X28,
6559         RISCV::X29, RISCV::X30, RISCV::X31};
6560     if (unsigned Reg = State.AllocateReg(GPRList)) {
6561       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6562       return false;
6563     }
6564   }
6565 
6566   if (LocVT == MVT::f16) {
6567     static const MCPhysReg FPR16List[] = {
6568         RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
6569         RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H,  RISCV::F1_H,
6570         RISCV::F2_H,  RISCV::F3_H,  RISCV::F4_H,  RISCV::F5_H,  RISCV::F6_H,
6571         RISCV::F7_H,  RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
6572     if (unsigned Reg = State.AllocateReg(FPR16List)) {
6573       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6574       return false;
6575     }
6576   }
6577 
6578   if (LocVT == MVT::f32) {
6579     static const MCPhysReg FPR32List[] = {
6580         RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
6581         RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F,  RISCV::F1_F,
6582         RISCV::F2_F,  RISCV::F3_F,  RISCV::F4_F,  RISCV::F5_F,  RISCV::F6_F,
6583         RISCV::F7_F,  RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
6584     if (unsigned Reg = State.AllocateReg(FPR32List)) {
6585       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6586       return false;
6587     }
6588   }
6589 
6590   if (LocVT == MVT::f64) {
6591     static const MCPhysReg FPR64List[] = {
6592         RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
6593         RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D,  RISCV::F1_D,
6594         RISCV::F2_D,  RISCV::F3_D,  RISCV::F4_D,  RISCV::F5_D,  RISCV::F6_D,
6595         RISCV::F7_D,  RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
6596     if (unsigned Reg = State.AllocateReg(FPR64List)) {
6597       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6598       return false;
6599     }
6600   }
6601 
6602   if (LocVT == MVT::i32 || LocVT == MVT::f32) {
6603     unsigned Offset4 = State.AllocateStack(4, Align(4));
6604     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
6605     return false;
6606   }
6607 
6608   if (LocVT == MVT::i64 || LocVT == MVT::f64) {
6609     unsigned Offset5 = State.AllocateStack(8, Align(8));
6610     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
6611     return false;
6612   }
6613 
6614   return true; // CC didn't match.
6615 }
6616 
6617 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
6618                          CCValAssign::LocInfo LocInfo,
6619                          ISD::ArgFlagsTy ArgFlags, CCState &State) {
6620 
6621   if (LocVT == MVT::i32 || LocVT == MVT::i64) {
6622     // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
6623     //                        s1    s2  s3  s4  s5  s6  s7  s8  s9  s10 s11
6624     static const MCPhysReg GPRList[] = {
6625         RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
6626         RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
6627     if (unsigned Reg = State.AllocateReg(GPRList)) {
6628       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6629       return false;
6630     }
6631   }
6632 
6633   if (LocVT == MVT::f32) {
6634     // Pass in STG registers: F1, ..., F6
6635     //                        fs0 ... fs5
6636     static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
6637                                           RISCV::F18_F, RISCV::F19_F,
6638                                           RISCV::F20_F, RISCV::F21_F};
6639     if (unsigned Reg = State.AllocateReg(FPR32List)) {
6640       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6641       return false;
6642     }
6643   }
6644 
6645   if (LocVT == MVT::f64) {
6646     // Pass in STG registers: D1, ..., D6
6647     //                        fs6 ... fs11
6648     static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
6649                                           RISCV::F24_D, RISCV::F25_D,
6650                                           RISCV::F26_D, RISCV::F27_D};
6651     if (unsigned Reg = State.AllocateReg(FPR64List)) {
6652       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
6653       return false;
6654     }
6655   }
6656 
6657   report_fatal_error("No registers left in GHC calling convention");
6658   return true;
6659 }
6660 
6661 // Transform physical registers into virtual registers.
6662 SDValue RISCVTargetLowering::LowerFormalArguments(
6663     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
6664     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
6665     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6666 
6667   MachineFunction &MF = DAG.getMachineFunction();
6668 
6669   switch (CallConv) {
6670   default:
6671     report_fatal_error("Unsupported calling convention");
6672   case CallingConv::C:
6673   case CallingConv::Fast:
6674     break;
6675   case CallingConv::GHC:
6676     if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
6677         !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
6678       report_fatal_error(
6679         "GHC calling convention requires the F and D instruction set extensions");
6680   }
6681 
6682   const Function &Func = MF.getFunction();
6683   if (Func.hasFnAttribute("interrupt")) {
6684     if (!Func.arg_empty())
6685       report_fatal_error(
6686         "Functions with the interrupt attribute cannot have arguments!");
6687 
6688     StringRef Kind =
6689       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
6690 
6691     if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
6692       report_fatal_error(
6693         "Function interrupt attribute argument not supported!");
6694   }
6695 
6696   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6697   MVT XLenVT = Subtarget.getXLenVT();
6698   unsigned XLenInBytes = Subtarget.getXLen() / 8;
6699   // Used with vargs to acumulate store chains.
6700   std::vector<SDValue> OutChains;
6701 
6702   // Assign locations to all of the incoming arguments.
6703   SmallVector<CCValAssign, 16> ArgLocs;
6704   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6705 
6706   if (CallConv == CallingConv::Fast)
6707     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
6708   else if (CallConv == CallingConv::GHC)
6709     CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
6710   else
6711     analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
6712 
6713   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
6714     CCValAssign &VA = ArgLocs[i];
6715     SDValue ArgValue;
6716     // Passing f64 on RV32D with a soft float ABI must be handled as a special
6717     // case.
6718     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
6719       ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
6720     else if (VA.isRegLoc())
6721       ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
6722     else
6723       ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
6724 
6725     if (VA.getLocInfo() == CCValAssign::Indirect) {
6726       // If the original argument was split and passed by reference (e.g. i128
6727       // on RV32), we need to load all parts of it here (using the same
6728       // address). Vectors may be partly split to registers and partly to the
6729       // stack, in which case the base address is partly offset and subsequent
6730       // stores are relative to that.
6731       InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
6732                                    MachinePointerInfo()));
6733       unsigned ArgIndex = Ins[i].OrigArgIndex;
6734       unsigned ArgPartOffset = Ins[i].PartOffset;
6735       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
6736       while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
6737         CCValAssign &PartVA = ArgLocs[i + 1];
6738         unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
6739         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
6740                                       DAG.getIntPtrConstant(PartOffset, DL));
6741         InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
6742                                      MachinePointerInfo()));
6743         ++i;
6744       }
6745       continue;
6746     }
6747     InVals.push_back(ArgValue);
6748   }
6749 
6750   if (IsVarArg) {
6751     ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
6752     unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
6753     const TargetRegisterClass *RC = &RISCV::GPRRegClass;
6754     MachineFrameInfo &MFI = MF.getFrameInfo();
6755     MachineRegisterInfo &RegInfo = MF.getRegInfo();
6756     RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
6757 
6758     // Offset of the first variable argument from stack pointer, and size of
6759     // the vararg save area. For now, the varargs save area is either zero or
6760     // large enough to hold a0-a7.
6761     int VaArgOffset, VarArgsSaveSize;
6762 
6763     // If all registers are allocated, then all varargs must be passed on the
6764     // stack and we don't need to save any argregs.
6765     if (ArgRegs.size() == Idx) {
6766       VaArgOffset = CCInfo.getNextStackOffset();
6767       VarArgsSaveSize = 0;
6768     } else {
6769       VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
6770       VaArgOffset = -VarArgsSaveSize;
6771     }
6772 
6773     // Record the frame index of the first variable argument
6774     // which is a value necessary to VASTART.
6775     int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
6776     RVFI->setVarArgsFrameIndex(FI);
6777 
6778     // If saving an odd number of registers then create an extra stack slot to
6779     // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
6780     // offsets to even-numbered registered remain 2*XLEN-aligned.
6781     if (Idx % 2) {
6782       MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
6783       VarArgsSaveSize += XLenInBytes;
6784     }
6785 
6786     // Copy the integer registers that may have been used for passing varargs
6787     // to the vararg save area.
6788     for (unsigned I = Idx; I < ArgRegs.size();
6789          ++I, VaArgOffset += XLenInBytes) {
6790       const Register Reg = RegInfo.createVirtualRegister(RC);
6791       RegInfo.addLiveIn(ArgRegs[I], Reg);
6792       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
6793       FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
6794       SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
6795       SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
6796                                    MachinePointerInfo::getFixedStack(MF, FI));
6797       cast<StoreSDNode>(Store.getNode())
6798           ->getMemOperand()
6799           ->setValue((Value *)nullptr);
6800       OutChains.push_back(Store);
6801     }
6802     RVFI->setVarArgsSaveSize(VarArgsSaveSize);
6803   }
6804 
6805   // All stores are grouped in one node to allow the matching between
6806   // the size of Ins and InVals. This only happens for vararg functions.
6807   if (!OutChains.empty()) {
6808     OutChains.push_back(Chain);
6809     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
6810   }
6811 
6812   return Chain;
6813 }
6814 
6815 /// isEligibleForTailCallOptimization - Check whether the call is eligible
6816 /// for tail call optimization.
6817 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
6818 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
6819     CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
6820     const SmallVector<CCValAssign, 16> &ArgLocs) const {
6821 
6822   auto &Callee = CLI.Callee;
6823   auto CalleeCC = CLI.CallConv;
6824   auto &Outs = CLI.Outs;
6825   auto &Caller = MF.getFunction();
6826   auto CallerCC = Caller.getCallingConv();
6827 
6828   // Exception-handling functions need a special set of instructions to
6829   // indicate a return to the hardware. Tail-calling another function would
6830   // probably break this.
6831   // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
6832   // should be expanded as new function attributes are introduced.
6833   if (Caller.hasFnAttribute("interrupt"))
6834     return false;
6835 
6836   // Do not tail call opt if the stack is used to pass parameters.
6837   if (CCInfo.getNextStackOffset() != 0)
6838     return false;
6839 
6840   // Do not tail call opt if any parameters need to be passed indirectly.
6841   // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
6842   // passed indirectly. So the address of the value will be passed in a
6843   // register, or if not available, then the address is put on the stack. In
6844   // order to pass indirectly, space on the stack often needs to be allocated
6845   // in order to store the value. In this case the CCInfo.getNextStackOffset()
6846   // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
6847   // are passed CCValAssign::Indirect.
6848   for (auto &VA : ArgLocs)
6849     if (VA.getLocInfo() == CCValAssign::Indirect)
6850       return false;
6851 
6852   // Do not tail call opt if either caller or callee uses struct return
6853   // semantics.
6854   auto IsCallerStructRet = Caller.hasStructRetAttr();
6855   auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
6856   if (IsCallerStructRet || IsCalleeStructRet)
6857     return false;
6858 
6859   // Externally-defined functions with weak linkage should not be
6860   // tail-called. The behaviour of branch instructions in this situation (as
6861   // used for tail calls) is implementation-defined, so we cannot rely on the
6862   // linker replacing the tail call with a return.
6863   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
6864     const GlobalValue *GV = G->getGlobal();
6865     if (GV->hasExternalWeakLinkage())
6866       return false;
6867   }
6868 
6869   // The callee has to preserve all registers the caller needs to preserve.
6870   const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
6871   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
6872   if (CalleeCC != CallerCC) {
6873     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
6874     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
6875       return false;
6876   }
6877 
6878   // Byval parameters hand the function a pointer directly into the stack area
6879   // we want to reuse during a tail call. Working around this *is* possible
6880   // but less efficient and uglier in LowerCall.
6881   for (auto &Arg : Outs)
6882     if (Arg.Flags.isByVal())
6883       return false;
6884 
6885   return true;
6886 }
6887 
6888 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
6889 // and output parameter nodes.
6890 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
6891                                        SmallVectorImpl<SDValue> &InVals) const {
6892   SelectionDAG &DAG = CLI.DAG;
6893   SDLoc &DL = CLI.DL;
6894   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
6895   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
6896   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
6897   SDValue Chain = CLI.Chain;
6898   SDValue Callee = CLI.Callee;
6899   bool &IsTailCall = CLI.IsTailCall;
6900   CallingConv::ID CallConv = CLI.CallConv;
6901   bool IsVarArg = CLI.IsVarArg;
6902   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6903   MVT XLenVT = Subtarget.getXLenVT();
6904 
6905   MachineFunction &MF = DAG.getMachineFunction();
6906 
6907   // Analyze the operands of the call, assigning locations to each operand.
6908   SmallVector<CCValAssign, 16> ArgLocs;
6909   CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
6910 
6911   if (CallConv == CallingConv::Fast)
6912     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
6913   else if (CallConv == CallingConv::GHC)
6914     ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
6915   else
6916     analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
6917 
6918   // Check if it's really possible to do a tail call.
6919   if (IsTailCall)
6920     IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
6921 
6922   if (IsTailCall)
6923     ++NumTailCalls;
6924   else if (CLI.CB && CLI.CB->isMustTailCall())
6925     report_fatal_error("failed to perform tail call elimination on a call "
6926                        "site marked musttail");
6927 
6928   // Get a count of how many bytes are to be pushed on the stack.
6929   unsigned NumBytes = ArgCCInfo.getNextStackOffset();
6930 
6931   // Create local copies for byval args
6932   SmallVector<SDValue, 8> ByValArgs;
6933   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
6934     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6935     if (!Flags.isByVal())
6936       continue;
6937 
6938     SDValue Arg = OutVals[i];
6939     unsigned Size = Flags.getByValSize();
6940     Align Alignment = Flags.getNonZeroByValAlign();
6941 
6942     int FI =
6943         MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
6944     SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
6945     SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
6946 
6947     Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
6948                           /*IsVolatile=*/false,
6949                           /*AlwaysInline=*/false, IsTailCall,
6950                           MachinePointerInfo(), MachinePointerInfo());
6951     ByValArgs.push_back(FIPtr);
6952   }
6953 
6954   if (!IsTailCall)
6955     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
6956 
6957   // Copy argument values to their designated locations.
6958   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
6959   SmallVector<SDValue, 8> MemOpChains;
6960   SDValue StackPtr;
6961   for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
6962     CCValAssign &VA = ArgLocs[i];
6963     SDValue ArgValue = OutVals[i];
6964     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6965 
6966     // Handle passing f64 on RV32D with a soft float ABI as a special case.
6967     bool IsF64OnRV32DSoftABI =
6968         VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
6969     if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
6970       SDValue SplitF64 = DAG.getNode(
6971           RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
6972       SDValue Lo = SplitF64.getValue(0);
6973       SDValue Hi = SplitF64.getValue(1);
6974 
6975       Register RegLo = VA.getLocReg();
6976       RegsToPass.push_back(std::make_pair(RegLo, Lo));
6977 
6978       if (RegLo == RISCV::X17) {
6979         // Second half of f64 is passed on the stack.
6980         // Work out the address of the stack slot.
6981         if (!StackPtr.getNode())
6982           StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
6983         // Emit the store.
6984         MemOpChains.push_back(
6985             DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
6986       } else {
6987         // Second half of f64 is passed in another GPR.
6988         assert(RegLo < RISCV::X31 && "Invalid register pair");
6989         Register RegHigh = RegLo + 1;
6990         RegsToPass.push_back(std::make_pair(RegHigh, Hi));
6991       }
6992       continue;
6993     }
6994 
6995     // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
6996     // as any other MemLoc.
6997 
6998     // Promote the value if needed.
6999     // For now, only handle fully promoted and indirect arguments.
7000     if (VA.getLocInfo() == CCValAssign::Indirect) {
7001       // Store the argument in a stack slot and pass its address.
7002       SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
7003       int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
7004       MemOpChains.push_back(
7005           DAG.getStore(Chain, DL, ArgValue, SpillSlot,
7006                        MachinePointerInfo::getFixedStack(MF, FI)));
7007       // If the original argument was split (e.g. i128), we need
7008       // to store the required parts of it here (and pass just one address).
7009       // Vectors may be partly split to registers and partly to the stack, in
7010       // which case the base address is partly offset and subsequent stores are
7011       // relative to that.
7012       unsigned ArgIndex = Outs[i].OrigArgIndex;
7013       unsigned ArgPartOffset = Outs[i].PartOffset;
7014       assert(VA.getValVT().isVector() || ArgPartOffset == 0);
7015       while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
7016         SDValue PartValue = OutVals[i + 1];
7017         unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
7018         SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
7019                                       DAG.getIntPtrConstant(PartOffset, DL));
7020         MemOpChains.push_back(
7021             DAG.getStore(Chain, DL, PartValue, Address,
7022                          MachinePointerInfo::getFixedStack(MF, FI)));
7023         ++i;
7024       }
7025       ArgValue = SpillSlot;
7026     } else {
7027       ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
7028     }
7029 
7030     // Use local copy if it is a byval arg.
7031     if (Flags.isByVal())
7032       ArgValue = ByValArgs[j++];
7033 
7034     if (VA.isRegLoc()) {
7035       // Queue up the argument copies and emit them at the end.
7036       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
7037     } else {
7038       assert(VA.isMemLoc() && "Argument not register or memory");
7039       assert(!IsTailCall && "Tail call not allowed if stack is used "
7040                             "for passing parameters");
7041 
7042       // Work out the address of the stack slot.
7043       if (!StackPtr.getNode())
7044         StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
7045       SDValue Address =
7046           DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
7047                       DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
7048 
7049       // Emit the store.
7050       MemOpChains.push_back(
7051           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
7052     }
7053   }
7054 
7055   // Join the stores, which are independent of one another.
7056   if (!MemOpChains.empty())
7057     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
7058 
7059   SDValue Glue;
7060 
7061   // Build a sequence of copy-to-reg nodes, chained and glued together.
7062   for (auto &Reg : RegsToPass) {
7063     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
7064     Glue = Chain.getValue(1);
7065   }
7066 
7067   // Validate that none of the argument registers have been marked as
7068   // reserved, if so report an error. Do the same for the return address if this
7069   // is not a tailcall.
7070   validateCCReservedRegs(RegsToPass, MF);
7071   if (!IsTailCall &&
7072       MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
7073     MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7074         MF.getFunction(),
7075         "Return address register required, but has been reserved."});
7076 
7077   // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
7078   // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
7079   // split it and then direct call can be matched by PseudoCALL.
7080   if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
7081     const GlobalValue *GV = S->getGlobal();
7082 
7083     unsigned OpFlags = RISCVII::MO_CALL;
7084     if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
7085       OpFlags = RISCVII::MO_PLT;
7086 
7087     Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
7088   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
7089     unsigned OpFlags = RISCVII::MO_CALL;
7090 
7091     if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
7092                                                  nullptr))
7093       OpFlags = RISCVII::MO_PLT;
7094 
7095     Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
7096   }
7097 
7098   // The first call operand is the chain and the second is the target address.
7099   SmallVector<SDValue, 8> Ops;
7100   Ops.push_back(Chain);
7101   Ops.push_back(Callee);
7102 
7103   // Add argument registers to the end of the list so that they are
7104   // known live into the call.
7105   for (auto &Reg : RegsToPass)
7106     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
7107 
7108   if (!IsTailCall) {
7109     // Add a register mask operand representing the call-preserved registers.
7110     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
7111     const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
7112     assert(Mask && "Missing call preserved mask for calling convention");
7113     Ops.push_back(DAG.getRegisterMask(Mask));
7114   }
7115 
7116   // Glue the call to the argument copies, if any.
7117   if (Glue.getNode())
7118     Ops.push_back(Glue);
7119 
7120   // Emit the call.
7121   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7122 
7123   if (IsTailCall) {
7124     MF.getFrameInfo().setHasTailCall();
7125     return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
7126   }
7127 
7128   Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
7129   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
7130   Glue = Chain.getValue(1);
7131 
7132   // Mark the end of the call, which is glued to the call itself.
7133   Chain = DAG.getCALLSEQ_END(Chain,
7134                              DAG.getConstant(NumBytes, DL, PtrVT, true),
7135                              DAG.getConstant(0, DL, PtrVT, true),
7136                              Glue, DL);
7137   Glue = Chain.getValue(1);
7138 
7139   // Assign locations to each value returned by this call.
7140   SmallVector<CCValAssign, 16> RVLocs;
7141   CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
7142   analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
7143 
7144   // Copy all of the result registers out of their specified physreg.
7145   for (auto &VA : RVLocs) {
7146     // Copy the value out
7147     SDValue RetValue =
7148         DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
7149     // Glue the RetValue to the end of the call sequence
7150     Chain = RetValue.getValue(1);
7151     Glue = RetValue.getValue(2);
7152 
7153     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7154       assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
7155       SDValue RetValue2 =
7156           DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
7157       Chain = RetValue2.getValue(1);
7158       Glue = RetValue2.getValue(2);
7159       RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
7160                              RetValue2);
7161     }
7162 
7163     RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
7164 
7165     InVals.push_back(RetValue);
7166   }
7167 
7168   return Chain;
7169 }
7170 
7171 bool RISCVTargetLowering::CanLowerReturn(
7172     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
7173     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
7174   SmallVector<CCValAssign, 16> RVLocs;
7175   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
7176 
7177   Optional<unsigned> FirstMaskArgument;
7178   if (Subtarget.hasStdExtV())
7179     FirstMaskArgument = preAssignMask(Outs);
7180 
7181   for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
7182     MVT VT = Outs[i].VT;
7183     ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
7184     RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
7185     if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
7186                  ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
7187                  *this, FirstMaskArgument))
7188       return false;
7189   }
7190   return true;
7191 }
7192 
7193 SDValue
7194 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7195                                  bool IsVarArg,
7196                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
7197                                  const SmallVectorImpl<SDValue> &OutVals,
7198                                  const SDLoc &DL, SelectionDAG &DAG) const {
7199   const MachineFunction &MF = DAG.getMachineFunction();
7200   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7201 
7202   // Stores the assignment of the return value to a location.
7203   SmallVector<CCValAssign, 16> RVLocs;
7204 
7205   // Info about the registers and stack slot.
7206   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
7207                  *DAG.getContext());
7208 
7209   analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
7210                     nullptr);
7211 
7212   if (CallConv == CallingConv::GHC && !RVLocs.empty())
7213     report_fatal_error("GHC functions return void only");
7214 
7215   SDValue Glue;
7216   SmallVector<SDValue, 4> RetOps(1, Chain);
7217 
7218   // Copy the result values into the output registers.
7219   for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
7220     SDValue Val = OutVals[i];
7221     CCValAssign &VA = RVLocs[i];
7222     assert(VA.isRegLoc() && "Can only return in registers!");
7223 
7224     if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
7225       // Handle returning f64 on RV32D with a soft float ABI.
7226       assert(VA.isRegLoc() && "Expected return via registers");
7227       SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
7228                                      DAG.getVTList(MVT::i32, MVT::i32), Val);
7229       SDValue Lo = SplitF64.getValue(0);
7230       SDValue Hi = SplitF64.getValue(1);
7231       Register RegLo = VA.getLocReg();
7232       assert(RegLo < RISCV::X31 && "Invalid register pair");
7233       Register RegHi = RegLo + 1;
7234 
7235       if (STI.isRegisterReservedByUser(RegLo) ||
7236           STI.isRegisterReservedByUser(RegHi))
7237         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7238             MF.getFunction(),
7239             "Return value register required, but has been reserved."});
7240 
7241       Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
7242       Glue = Chain.getValue(1);
7243       RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
7244       Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
7245       Glue = Chain.getValue(1);
7246       RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
7247     } else {
7248       // Handle a 'normal' return.
7249       Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
7250       Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
7251 
7252       if (STI.isRegisterReservedByUser(VA.getLocReg()))
7253         MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
7254             MF.getFunction(),
7255             "Return value register required, but has been reserved."});
7256 
7257       // Guarantee that all emitted copies are stuck together.
7258       Glue = Chain.getValue(1);
7259       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7260     }
7261   }
7262 
7263   RetOps[0] = Chain; // Update chain.
7264 
7265   // Add the glue node if we have it.
7266   if (Glue.getNode()) {
7267     RetOps.push_back(Glue);
7268   }
7269 
7270   // Interrupt service routines use different return instructions.
7271   const Function &Func = DAG.getMachineFunction().getFunction();
7272   if (Func.hasFnAttribute("interrupt")) {
7273     if (!Func.getReturnType()->isVoidTy())
7274       report_fatal_error(
7275           "Functions with the interrupt attribute must have void return type!");
7276 
7277     MachineFunction &MF = DAG.getMachineFunction();
7278     StringRef Kind =
7279       MF.getFunction().getFnAttribute("interrupt").getValueAsString();
7280 
7281     unsigned RetOpc;
7282     if (Kind == "user")
7283       RetOpc = RISCVISD::URET_FLAG;
7284     else if (Kind == "supervisor")
7285       RetOpc = RISCVISD::SRET_FLAG;
7286     else
7287       RetOpc = RISCVISD::MRET_FLAG;
7288 
7289     return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
7290   }
7291 
7292   return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
7293 }
7294 
7295 void RISCVTargetLowering::validateCCReservedRegs(
7296     const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
7297     MachineFunction &MF) const {
7298   const Function &F = MF.getFunction();
7299   const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
7300 
7301   if (llvm::any_of(Regs, [&STI](auto Reg) {
7302         return STI.isRegisterReservedByUser(Reg.first);
7303       }))
7304     F.getContext().diagnose(DiagnosticInfoUnsupported{
7305         F, "Argument register required, but has been reserved."});
7306 }
7307 
7308 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
7309   return CI->isTailCall();
7310 }
7311 
7312 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
7313 #define NODE_NAME_CASE(NODE)                                                   \
7314   case RISCVISD::NODE:                                                         \
7315     return "RISCVISD::" #NODE;
7316   // clang-format off
7317   switch ((RISCVISD::NodeType)Opcode) {
7318   case RISCVISD::FIRST_NUMBER:
7319     break;
7320   NODE_NAME_CASE(RET_FLAG)
7321   NODE_NAME_CASE(URET_FLAG)
7322   NODE_NAME_CASE(SRET_FLAG)
7323   NODE_NAME_CASE(MRET_FLAG)
7324   NODE_NAME_CASE(CALL)
7325   NODE_NAME_CASE(SELECT_CC)
7326   NODE_NAME_CASE(BR_CC)
7327   NODE_NAME_CASE(BuildPairF64)
7328   NODE_NAME_CASE(SplitF64)
7329   NODE_NAME_CASE(TAIL)
7330   NODE_NAME_CASE(MULHSU)
7331   NODE_NAME_CASE(SLLW)
7332   NODE_NAME_CASE(SRAW)
7333   NODE_NAME_CASE(SRLW)
7334   NODE_NAME_CASE(DIVW)
7335   NODE_NAME_CASE(DIVUW)
7336   NODE_NAME_CASE(REMUW)
7337   NODE_NAME_CASE(ROLW)
7338   NODE_NAME_CASE(RORW)
7339   NODE_NAME_CASE(CLZW)
7340   NODE_NAME_CASE(CTZW)
7341   NODE_NAME_CASE(FSLW)
7342   NODE_NAME_CASE(FSRW)
7343   NODE_NAME_CASE(FSL)
7344   NODE_NAME_CASE(FSR)
7345   NODE_NAME_CASE(FMV_H_X)
7346   NODE_NAME_CASE(FMV_X_ANYEXTH)
7347   NODE_NAME_CASE(FMV_W_X_RV64)
7348   NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
7349   NODE_NAME_CASE(READ_CYCLE_WIDE)
7350   NODE_NAME_CASE(GREV)
7351   NODE_NAME_CASE(GREVW)
7352   NODE_NAME_CASE(GORC)
7353   NODE_NAME_CASE(GORCW)
7354   NODE_NAME_CASE(SHFL)
7355   NODE_NAME_CASE(VMV_V_X_VL)
7356   NODE_NAME_CASE(VFMV_V_F_VL)
7357   NODE_NAME_CASE(VMV_X_S)
7358   NODE_NAME_CASE(VMV_S_X_VL)
7359   NODE_NAME_CASE(VFMV_S_F_VL)
7360   NODE_NAME_CASE(SPLAT_VECTOR_I64)
7361   NODE_NAME_CASE(READ_VLENB)
7362   NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
7363   NODE_NAME_CASE(VLEFF)
7364   NODE_NAME_CASE(VLEFF_MASK)
7365   NODE_NAME_CASE(VSLIDEUP_VL)
7366   NODE_NAME_CASE(VSLIDE1UP_VL)
7367   NODE_NAME_CASE(VSLIDEDOWN_VL)
7368   NODE_NAME_CASE(VSLIDE1DOWN_VL)
7369   NODE_NAME_CASE(VID_VL)
7370   NODE_NAME_CASE(VFNCVT_ROD_VL)
7371   NODE_NAME_CASE(VECREDUCE_ADD_VL)
7372   NODE_NAME_CASE(VECREDUCE_UMAX_VL)
7373   NODE_NAME_CASE(VECREDUCE_SMAX_VL)
7374   NODE_NAME_CASE(VECREDUCE_UMIN_VL)
7375   NODE_NAME_CASE(VECREDUCE_SMIN_VL)
7376   NODE_NAME_CASE(VECREDUCE_AND_VL)
7377   NODE_NAME_CASE(VECREDUCE_OR_VL)
7378   NODE_NAME_CASE(VECREDUCE_XOR_VL)
7379   NODE_NAME_CASE(VECREDUCE_FADD_VL)
7380   NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
7381   NODE_NAME_CASE(ADD_VL)
7382   NODE_NAME_CASE(AND_VL)
7383   NODE_NAME_CASE(MUL_VL)
7384   NODE_NAME_CASE(OR_VL)
7385   NODE_NAME_CASE(SDIV_VL)
7386   NODE_NAME_CASE(SHL_VL)
7387   NODE_NAME_CASE(SREM_VL)
7388   NODE_NAME_CASE(SRA_VL)
7389   NODE_NAME_CASE(SRL_VL)
7390   NODE_NAME_CASE(SUB_VL)
7391   NODE_NAME_CASE(UDIV_VL)
7392   NODE_NAME_CASE(UREM_VL)
7393   NODE_NAME_CASE(XOR_VL)
7394   NODE_NAME_CASE(FADD_VL)
7395   NODE_NAME_CASE(FSUB_VL)
7396   NODE_NAME_CASE(FMUL_VL)
7397   NODE_NAME_CASE(FDIV_VL)
7398   NODE_NAME_CASE(FNEG_VL)
7399   NODE_NAME_CASE(FABS_VL)
7400   NODE_NAME_CASE(FSQRT_VL)
7401   NODE_NAME_CASE(FMA_VL)
7402   NODE_NAME_CASE(FCOPYSIGN_VL)
7403   NODE_NAME_CASE(SMIN_VL)
7404   NODE_NAME_CASE(SMAX_VL)
7405   NODE_NAME_CASE(UMIN_VL)
7406   NODE_NAME_CASE(UMAX_VL)
7407   NODE_NAME_CASE(MULHS_VL)
7408   NODE_NAME_CASE(MULHU_VL)
7409   NODE_NAME_CASE(FP_TO_SINT_VL)
7410   NODE_NAME_CASE(FP_TO_UINT_VL)
7411   NODE_NAME_CASE(SINT_TO_FP_VL)
7412   NODE_NAME_CASE(UINT_TO_FP_VL)
7413   NODE_NAME_CASE(FP_EXTEND_VL)
7414   NODE_NAME_CASE(FP_ROUND_VL)
7415   NODE_NAME_CASE(SETCC_VL)
7416   NODE_NAME_CASE(VSELECT_VL)
7417   NODE_NAME_CASE(VMAND_VL)
7418   NODE_NAME_CASE(VMOR_VL)
7419   NODE_NAME_CASE(VMXOR_VL)
7420   NODE_NAME_CASE(VMCLR_VL)
7421   NODE_NAME_CASE(VMSET_VL)
7422   NODE_NAME_CASE(VRGATHER_VX_VL)
7423   NODE_NAME_CASE(VRGATHER_VV_VL)
7424   NODE_NAME_CASE(VRGATHEREI16_VV_VL)
7425   NODE_NAME_CASE(VSEXT_VL)
7426   NODE_NAME_CASE(VZEXT_VL)
7427   NODE_NAME_CASE(VPOPC_VL)
7428   NODE_NAME_CASE(VLE_VL)
7429   NODE_NAME_CASE(VSE_VL)
7430   NODE_NAME_CASE(READ_CSR)
7431   NODE_NAME_CASE(WRITE_CSR)
7432   NODE_NAME_CASE(SWAP_CSR)
7433   }
7434   // clang-format on
7435   return nullptr;
7436 #undef NODE_NAME_CASE
7437 }
7438 
7439 /// getConstraintType - Given a constraint letter, return the type of
7440 /// constraint it is for this target.
7441 RISCVTargetLowering::ConstraintType
7442 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
7443   if (Constraint.size() == 1) {
7444     switch (Constraint[0]) {
7445     default:
7446       break;
7447     case 'f':
7448     case 'v':
7449       return C_RegisterClass;
7450     case 'I':
7451     case 'J':
7452     case 'K':
7453       return C_Immediate;
7454     case 'A':
7455       return C_Memory;
7456     }
7457   }
7458   return TargetLowering::getConstraintType(Constraint);
7459 }
7460 
7461 std::pair<unsigned, const TargetRegisterClass *>
7462 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
7463                                                   StringRef Constraint,
7464                                                   MVT VT) const {
7465   // First, see if this is a constraint that directly corresponds to a
7466   // RISCV register class.
7467   if (Constraint.size() == 1) {
7468     switch (Constraint[0]) {
7469     case 'r':
7470       return std::make_pair(0U, &RISCV::GPRRegClass);
7471     case 'f':
7472       if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
7473         return std::make_pair(0U, &RISCV::FPR16RegClass);
7474       if (Subtarget.hasStdExtF() && VT == MVT::f32)
7475         return std::make_pair(0U, &RISCV::FPR32RegClass);
7476       if (Subtarget.hasStdExtD() && VT == MVT::f64)
7477         return std::make_pair(0U, &RISCV::FPR64RegClass);
7478       break;
7479     case 'v':
7480       for (const auto *RC :
7481            {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass,
7482             &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
7483         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
7484           return std::make_pair(0U, RC);
7485       }
7486       break;
7487     default:
7488       break;
7489     }
7490   }
7491 
7492   // Clang will correctly decode the usage of register name aliases into their
7493   // official names. However, other frontends like `rustc` do not. This allows
7494   // users of these frontends to use the ABI names for registers in LLVM-style
7495   // register constraints.
7496   unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
7497                                .Case("{zero}", RISCV::X0)
7498                                .Case("{ra}", RISCV::X1)
7499                                .Case("{sp}", RISCV::X2)
7500                                .Case("{gp}", RISCV::X3)
7501                                .Case("{tp}", RISCV::X4)
7502                                .Case("{t0}", RISCV::X5)
7503                                .Case("{t1}", RISCV::X6)
7504                                .Case("{t2}", RISCV::X7)
7505                                .Cases("{s0}", "{fp}", RISCV::X8)
7506                                .Case("{s1}", RISCV::X9)
7507                                .Case("{a0}", RISCV::X10)
7508                                .Case("{a1}", RISCV::X11)
7509                                .Case("{a2}", RISCV::X12)
7510                                .Case("{a3}", RISCV::X13)
7511                                .Case("{a4}", RISCV::X14)
7512                                .Case("{a5}", RISCV::X15)
7513                                .Case("{a6}", RISCV::X16)
7514                                .Case("{a7}", RISCV::X17)
7515                                .Case("{s2}", RISCV::X18)
7516                                .Case("{s3}", RISCV::X19)
7517                                .Case("{s4}", RISCV::X20)
7518                                .Case("{s5}", RISCV::X21)
7519                                .Case("{s6}", RISCV::X22)
7520                                .Case("{s7}", RISCV::X23)
7521                                .Case("{s8}", RISCV::X24)
7522                                .Case("{s9}", RISCV::X25)
7523                                .Case("{s10}", RISCV::X26)
7524                                .Case("{s11}", RISCV::X27)
7525                                .Case("{t3}", RISCV::X28)
7526                                .Case("{t4}", RISCV::X29)
7527                                .Case("{t5}", RISCV::X30)
7528                                .Case("{t6}", RISCV::X31)
7529                                .Default(RISCV::NoRegister);
7530   if (XRegFromAlias != RISCV::NoRegister)
7531     return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
7532 
7533   // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
7534   // TableGen record rather than the AsmName to choose registers for InlineAsm
7535   // constraints, plus we want to match those names to the widest floating point
7536   // register type available, manually select floating point registers here.
7537   //
7538   // The second case is the ABI name of the register, so that frontends can also
7539   // use the ABI names in register constraint lists.
7540   if (Subtarget.hasStdExtF()) {
7541     unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
7542                         .Cases("{f0}", "{ft0}", RISCV::F0_F)
7543                         .Cases("{f1}", "{ft1}", RISCV::F1_F)
7544                         .Cases("{f2}", "{ft2}", RISCV::F2_F)
7545                         .Cases("{f3}", "{ft3}", RISCV::F3_F)
7546                         .Cases("{f4}", "{ft4}", RISCV::F4_F)
7547                         .Cases("{f5}", "{ft5}", RISCV::F5_F)
7548                         .Cases("{f6}", "{ft6}", RISCV::F6_F)
7549                         .Cases("{f7}", "{ft7}", RISCV::F7_F)
7550                         .Cases("{f8}", "{fs0}", RISCV::F8_F)
7551                         .Cases("{f9}", "{fs1}", RISCV::F9_F)
7552                         .Cases("{f10}", "{fa0}", RISCV::F10_F)
7553                         .Cases("{f11}", "{fa1}", RISCV::F11_F)
7554                         .Cases("{f12}", "{fa2}", RISCV::F12_F)
7555                         .Cases("{f13}", "{fa3}", RISCV::F13_F)
7556                         .Cases("{f14}", "{fa4}", RISCV::F14_F)
7557                         .Cases("{f15}", "{fa5}", RISCV::F15_F)
7558                         .Cases("{f16}", "{fa6}", RISCV::F16_F)
7559                         .Cases("{f17}", "{fa7}", RISCV::F17_F)
7560                         .Cases("{f18}", "{fs2}", RISCV::F18_F)
7561                         .Cases("{f19}", "{fs3}", RISCV::F19_F)
7562                         .Cases("{f20}", "{fs4}", RISCV::F20_F)
7563                         .Cases("{f21}", "{fs5}", RISCV::F21_F)
7564                         .Cases("{f22}", "{fs6}", RISCV::F22_F)
7565                         .Cases("{f23}", "{fs7}", RISCV::F23_F)
7566                         .Cases("{f24}", "{fs8}", RISCV::F24_F)
7567                         .Cases("{f25}", "{fs9}", RISCV::F25_F)
7568                         .Cases("{f26}", "{fs10}", RISCV::F26_F)
7569                         .Cases("{f27}", "{fs11}", RISCV::F27_F)
7570                         .Cases("{f28}", "{ft8}", RISCV::F28_F)
7571                         .Cases("{f29}", "{ft9}", RISCV::F29_F)
7572                         .Cases("{f30}", "{ft10}", RISCV::F30_F)
7573                         .Cases("{f31}", "{ft11}", RISCV::F31_F)
7574                         .Default(RISCV::NoRegister);
7575     if (FReg != RISCV::NoRegister) {
7576       assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
7577       if (Subtarget.hasStdExtD()) {
7578         unsigned RegNo = FReg - RISCV::F0_F;
7579         unsigned DReg = RISCV::F0_D + RegNo;
7580         return std::make_pair(DReg, &RISCV::FPR64RegClass);
7581       }
7582       return std::make_pair(FReg, &RISCV::FPR32RegClass);
7583     }
7584   }
7585 
7586   if (Subtarget.hasStdExtV()) {
7587     Register VReg = StringSwitch<Register>(Constraint.lower())
7588                         .Case("{v0}", RISCV::V0)
7589                         .Case("{v1}", RISCV::V1)
7590                         .Case("{v2}", RISCV::V2)
7591                         .Case("{v3}", RISCV::V3)
7592                         .Case("{v4}", RISCV::V4)
7593                         .Case("{v5}", RISCV::V5)
7594                         .Case("{v6}", RISCV::V6)
7595                         .Case("{v7}", RISCV::V7)
7596                         .Case("{v8}", RISCV::V8)
7597                         .Case("{v9}", RISCV::V9)
7598                         .Case("{v10}", RISCV::V10)
7599                         .Case("{v11}", RISCV::V11)
7600                         .Case("{v12}", RISCV::V12)
7601                         .Case("{v13}", RISCV::V13)
7602                         .Case("{v14}", RISCV::V14)
7603                         .Case("{v15}", RISCV::V15)
7604                         .Case("{v16}", RISCV::V16)
7605                         .Case("{v17}", RISCV::V17)
7606                         .Case("{v18}", RISCV::V18)
7607                         .Case("{v19}", RISCV::V19)
7608                         .Case("{v20}", RISCV::V20)
7609                         .Case("{v21}", RISCV::V21)
7610                         .Case("{v22}", RISCV::V22)
7611                         .Case("{v23}", RISCV::V23)
7612                         .Case("{v24}", RISCV::V24)
7613                         .Case("{v25}", RISCV::V25)
7614                         .Case("{v26}", RISCV::V26)
7615                         .Case("{v27}", RISCV::V27)
7616                         .Case("{v28}", RISCV::V28)
7617                         .Case("{v29}", RISCV::V29)
7618                         .Case("{v30}", RISCV::V30)
7619                         .Case("{v31}", RISCV::V31)
7620                         .Default(RISCV::NoRegister);
7621     if (VReg != RISCV::NoRegister) {
7622       if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
7623         return std::make_pair(VReg, &RISCV::VMRegClass);
7624       if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
7625         return std::make_pair(VReg, &RISCV::VRRegClass);
7626       for (const auto *RC :
7627            {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
7628         if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
7629           VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
7630           return std::make_pair(VReg, RC);
7631         }
7632       }
7633     }
7634   }
7635 
7636   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
7637 }
7638 
7639 unsigned
7640 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
7641   // Currently only support length 1 constraints.
7642   if (ConstraintCode.size() == 1) {
7643     switch (ConstraintCode[0]) {
7644     case 'A':
7645       return InlineAsm::Constraint_A;
7646     default:
7647       break;
7648     }
7649   }
7650 
7651   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
7652 }
7653 
7654 void RISCVTargetLowering::LowerAsmOperandForConstraint(
7655     SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
7656     SelectionDAG &DAG) const {
7657   // Currently only support length 1 constraints.
7658   if (Constraint.length() == 1) {
7659     switch (Constraint[0]) {
7660     case 'I':
7661       // Validate & create a 12-bit signed immediate operand.
7662       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
7663         uint64_t CVal = C->getSExtValue();
7664         if (isInt<12>(CVal))
7665           Ops.push_back(
7666               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
7667       }
7668       return;
7669     case 'J':
7670       // Validate & create an integer zero operand.
7671       if (auto *C = dyn_cast<ConstantSDNode>(Op))
7672         if (C->getZExtValue() == 0)
7673           Ops.push_back(
7674               DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
7675       return;
7676     case 'K':
7677       // Validate & create a 5-bit unsigned immediate operand.
7678       if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
7679         uint64_t CVal = C->getZExtValue();
7680         if (isUInt<5>(CVal))
7681           Ops.push_back(
7682               DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
7683       }
7684       return;
7685     default:
7686       break;
7687     }
7688   }
7689   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
7690 }
7691 
7692 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
7693                                                    Instruction *Inst,
7694                                                    AtomicOrdering Ord) const {
7695   if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
7696     return Builder.CreateFence(Ord);
7697   if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
7698     return Builder.CreateFence(AtomicOrdering::Release);
7699   return nullptr;
7700 }
7701 
7702 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
7703                                                     Instruction *Inst,
7704                                                     AtomicOrdering Ord) const {
7705   if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
7706     return Builder.CreateFence(AtomicOrdering::Acquire);
7707   return nullptr;
7708 }
7709 
7710 TargetLowering::AtomicExpansionKind
7711 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
7712   // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
7713   // point operations can't be used in an lr/sc sequence without breaking the
7714   // forward-progress guarantee.
7715   if (AI->isFloatingPointOperation())
7716     return AtomicExpansionKind::CmpXChg;
7717 
7718   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
7719   if (Size == 8 || Size == 16)
7720     return AtomicExpansionKind::MaskedIntrinsic;
7721   return AtomicExpansionKind::None;
7722 }
7723 
7724 static Intrinsic::ID
7725 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
7726   if (XLen == 32) {
7727     switch (BinOp) {
7728     default:
7729       llvm_unreachable("Unexpected AtomicRMW BinOp");
7730     case AtomicRMWInst::Xchg:
7731       return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
7732     case AtomicRMWInst::Add:
7733       return Intrinsic::riscv_masked_atomicrmw_add_i32;
7734     case AtomicRMWInst::Sub:
7735       return Intrinsic::riscv_masked_atomicrmw_sub_i32;
7736     case AtomicRMWInst::Nand:
7737       return Intrinsic::riscv_masked_atomicrmw_nand_i32;
7738     case AtomicRMWInst::Max:
7739       return Intrinsic::riscv_masked_atomicrmw_max_i32;
7740     case AtomicRMWInst::Min:
7741       return Intrinsic::riscv_masked_atomicrmw_min_i32;
7742     case AtomicRMWInst::UMax:
7743       return Intrinsic::riscv_masked_atomicrmw_umax_i32;
7744     case AtomicRMWInst::UMin:
7745       return Intrinsic::riscv_masked_atomicrmw_umin_i32;
7746     }
7747   }
7748 
7749   if (XLen == 64) {
7750     switch (BinOp) {
7751     default:
7752       llvm_unreachable("Unexpected AtomicRMW BinOp");
7753     case AtomicRMWInst::Xchg:
7754       return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
7755     case AtomicRMWInst::Add:
7756       return Intrinsic::riscv_masked_atomicrmw_add_i64;
7757     case AtomicRMWInst::Sub:
7758       return Intrinsic::riscv_masked_atomicrmw_sub_i64;
7759     case AtomicRMWInst::Nand:
7760       return Intrinsic::riscv_masked_atomicrmw_nand_i64;
7761     case AtomicRMWInst::Max:
7762       return Intrinsic::riscv_masked_atomicrmw_max_i64;
7763     case AtomicRMWInst::Min:
7764       return Intrinsic::riscv_masked_atomicrmw_min_i64;
7765     case AtomicRMWInst::UMax:
7766       return Intrinsic::riscv_masked_atomicrmw_umax_i64;
7767     case AtomicRMWInst::UMin:
7768       return Intrinsic::riscv_masked_atomicrmw_umin_i64;
7769     }
7770   }
7771 
7772   llvm_unreachable("Unexpected XLen\n");
7773 }
7774 
7775 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
7776     IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
7777     Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
7778   unsigned XLen = Subtarget.getXLen();
7779   Value *Ordering =
7780       Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
7781   Type *Tys[] = {AlignedAddr->getType()};
7782   Function *LrwOpScwLoop = Intrinsic::getDeclaration(
7783       AI->getModule(),
7784       getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
7785 
7786   if (XLen == 64) {
7787     Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
7788     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
7789     ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
7790   }
7791 
7792   Value *Result;
7793 
7794   // Must pass the shift amount needed to sign extend the loaded value prior
7795   // to performing a signed comparison for min/max. ShiftAmt is the number of
7796   // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
7797   // is the number of bits to left+right shift the value in order to
7798   // sign-extend.
7799   if (AI->getOperation() == AtomicRMWInst::Min ||
7800       AI->getOperation() == AtomicRMWInst::Max) {
7801     const DataLayout &DL = AI->getModule()->getDataLayout();
7802     unsigned ValWidth =
7803         DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
7804     Value *SextShamt =
7805         Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
7806     Result = Builder.CreateCall(LrwOpScwLoop,
7807                                 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
7808   } else {
7809     Result =
7810         Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
7811   }
7812 
7813   if (XLen == 64)
7814     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
7815   return Result;
7816 }
7817 
7818 TargetLowering::AtomicExpansionKind
7819 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
7820     AtomicCmpXchgInst *CI) const {
7821   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
7822   if (Size == 8 || Size == 16)
7823     return AtomicExpansionKind::MaskedIntrinsic;
7824   return AtomicExpansionKind::None;
7825 }
7826 
7827 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
7828     IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
7829     Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
7830   unsigned XLen = Subtarget.getXLen();
7831   Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
7832   Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
7833   if (XLen == 64) {
7834     CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
7835     NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
7836     Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
7837     CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
7838   }
7839   Type *Tys[] = {AlignedAddr->getType()};
7840   Function *MaskedCmpXchg =
7841       Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
7842   Value *Result = Builder.CreateCall(
7843       MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
7844   if (XLen == 64)
7845     Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
7846   return Result;
7847 }
7848 
7849 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
7850   return false;
7851 }
7852 
7853 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
7854                                                      EVT VT) const {
7855   VT = VT.getScalarType();
7856 
7857   if (!VT.isSimple())
7858     return false;
7859 
7860   switch (VT.getSimpleVT().SimpleTy) {
7861   case MVT::f16:
7862     return Subtarget.hasStdExtZfh();
7863   case MVT::f32:
7864     return Subtarget.hasStdExtF();
7865   case MVT::f64:
7866     return Subtarget.hasStdExtD();
7867   default:
7868     break;
7869   }
7870 
7871   return false;
7872 }
7873 
7874 Register RISCVTargetLowering::getExceptionPointerRegister(
7875     const Constant *PersonalityFn) const {
7876   return RISCV::X10;
7877 }
7878 
7879 Register RISCVTargetLowering::getExceptionSelectorRegister(
7880     const Constant *PersonalityFn) const {
7881   return RISCV::X11;
7882 }
7883 
7884 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
7885   // Return false to suppress the unnecessary extensions if the LibCall
7886   // arguments or return value is f32 type for LP64 ABI.
7887   RISCVABI::ABI ABI = Subtarget.getTargetABI();
7888   if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
7889     return false;
7890 
7891   return true;
7892 }
7893 
7894 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
7895   if (Subtarget.is64Bit() && Type == MVT::i32)
7896     return true;
7897 
7898   return IsSigned;
7899 }
7900 
7901 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
7902                                                  SDValue C) const {
7903   // Check integral scalar types.
7904   if (VT.isScalarInteger()) {
7905     // Omit the optimization if the sub target has the M extension and the data
7906     // size exceeds XLen.
7907     if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
7908       return false;
7909     if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
7910       // Break the MUL to a SLLI and an ADD/SUB.
7911       const APInt &Imm = ConstNode->getAPIntValue();
7912       if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
7913           (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
7914         return true;
7915       // Omit the following optimization if the sub target has the M extension
7916       // and the data size >= XLen.
7917       if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
7918         return false;
7919       // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
7920       // a pair of LUI/ADDI.
7921       if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
7922         APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
7923         if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
7924             (1 - ImmS).isPowerOf2())
7925         return true;
7926       }
7927     }
7928   }
7929 
7930   return false;
7931 }
7932 
7933 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
7934   if (!Subtarget.useRVVForFixedLengthVectors())
7935     return false;
7936 
7937   if (!VT.isFixedLengthVector())
7938     return false;
7939 
7940   // Don't use RVV for vectors we cannot scalarize if required.
7941   switch (VT.getVectorElementType().SimpleTy) {
7942   // i1 is supported but has different rules.
7943   default:
7944     return false;
7945   case MVT::i1:
7946     // Masks can only use a single register.
7947     if (VT.getVectorNumElements() > Subtarget.getMinRVVVectorSizeInBits())
7948       return false;
7949     break;
7950   case MVT::i8:
7951   case MVT::i16:
7952   case MVT::i32:
7953   case MVT::i64:
7954     break;
7955   case MVT::f16:
7956     if (!Subtarget.hasStdExtZfh())
7957       return false;
7958     break;
7959   case MVT::f32:
7960     if (!Subtarget.hasStdExtF())
7961       return false;
7962     break;
7963   case MVT::f64:
7964     if (!Subtarget.hasStdExtD())
7965       return false;
7966     break;
7967   }
7968 
7969   unsigned LMul = Subtarget.getLMULForFixedLengthVector(VT);
7970   // Don't use RVV for types that don't fit.
7971   if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
7972     return false;
7973 
7974   // TODO: Perhaps an artificial restriction, but worth having whilst getting
7975   // the base fixed length RVV support in place.
7976   if (!VT.isPow2VectorType())
7977     return false;
7978 
7979   return true;
7980 }
7981 
7982 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
7983     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
7984     bool *Fast) const {
7985   if (!VT.isScalableVector())
7986     return false;
7987 
7988   EVT ElemVT = VT.getVectorElementType();
7989   if (Alignment >= ElemVT.getStoreSize()) {
7990     if (Fast)
7991       *Fast = true;
7992     return true;
7993   }
7994 
7995   return false;
7996 }
7997 
7998 bool RISCVTargetLowering::splitValueIntoRegisterParts(
7999     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
8000     unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
8001   bool IsABIRegCopy = CC.hasValue();
8002   EVT ValueVT = Val.getValueType();
8003   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8004     // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
8005     // and cast to f32.
8006     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
8007     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
8008     Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
8009                       DAG.getConstant(0xFFFF0000, DL, MVT::i32));
8010     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
8011     Parts[0] = Val;
8012     return true;
8013   }
8014 
8015   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8016     LLVMContext &Context = *DAG.getContext();
8017     EVT ValueEltVT = ValueVT.getVectorElementType();
8018     EVT PartEltVT = PartVT.getVectorElementType();
8019     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8020     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8021     if (PartVTBitSize % ValueVTBitSize == 0) {
8022       // If the element types are different, bitcast to the same element type of
8023       // PartVT first.
8024       if (ValueEltVT != PartEltVT) {
8025         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8026         assert(Count != 0 && "The number of element should not be zero.");
8027         EVT SameEltTypeVT =
8028             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8029         Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
8030       }
8031       Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
8032                         Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8033       Parts[0] = Val;
8034       return true;
8035     }
8036   }
8037   return false;
8038 }
8039 
8040 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
8041     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
8042     MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
8043   bool IsABIRegCopy = CC.hasValue();
8044   if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
8045     SDValue Val = Parts[0];
8046 
8047     // Cast the f32 to i32, truncate to i16, and cast back to f16.
8048     Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
8049     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
8050     Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
8051     return Val;
8052   }
8053 
8054   if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
8055     LLVMContext &Context = *DAG.getContext();
8056     SDValue Val = Parts[0];
8057     EVT ValueEltVT = ValueVT.getVectorElementType();
8058     EVT PartEltVT = PartVT.getVectorElementType();
8059     unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
8060     unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
8061     if (PartVTBitSize % ValueVTBitSize == 0) {
8062       EVT SameEltTypeVT = ValueVT;
8063       // If the element types are different, convert it to the same element type
8064       // of PartVT.
8065       if (ValueEltVT != PartEltVT) {
8066         unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
8067         assert(Count != 0 && "The number of element should not be zero.");
8068         SameEltTypeVT =
8069             EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
8070       }
8071       Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
8072                         DAG.getConstant(0, DL, Subtarget.getXLenVT()));
8073       if (ValueEltVT != PartEltVT)
8074         Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
8075       return Val;
8076     }
8077   }
8078   return SDValue();
8079 }
8080 
8081 #define GET_REGISTER_MATCHER
8082 #include "RISCVGenAsmMatcher.inc"
8083 
8084 Register
8085 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
8086                                        const MachineFunction &MF) const {
8087   Register Reg = MatchRegisterAltName(RegName);
8088   if (Reg == RISCV::NoRegister)
8089     Reg = MatchRegisterName(RegName);
8090   if (Reg == RISCV::NoRegister)
8091     report_fatal_error(
8092         Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
8093   BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
8094   if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
8095     report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
8096                              StringRef(RegName) + "\"."));
8097   return Reg;
8098 }
8099 
8100 namespace llvm {
8101 namespace RISCVVIntrinsicsTable {
8102 
8103 #define GET_RISCVVIntrinsicsTable_IMPL
8104 #include "RISCVGenSearchableTables.inc"
8105 
8106 } // namespace RISCVVIntrinsicsTable
8107 
8108 } // namespace llvm
8109